From bdc0d96d79d11a04d87069e116a14b62f61a04c2 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Wed, 4 Jul 2018 12:58:59 +0200 Subject: [PATCH 001/174] api for ACL vppcalls Signed-off-by: Vladimir Lavor --- plugins/linux/ifplugin/interface_config.go | 2 +- plugins/rest/rest_handlers.go | 20 +- plugins/vpp/aclplugin/acl_config.go | 67 ++- plugins/vpp/aclplugin/acl_config_test.go | 4 +- plugins/vpp/aclplugin/data_resync.go | 21 +- .../vpp/aclplugin/vppcalls/acl_vppcalls.go | 107 ++--- .../aclplugin/vppcalls/acl_vppcalls_test.go | 83 ++-- .../vpp/aclplugin/vppcalls/api_vppcalls.go | 90 ++++ plugins/vpp/aclplugin/vppcalls/doc.go | 3 +- .../{vppdump => vppcalls}/dump_vppcalls.go | 401 ++++++++---------- .../dump_vppcalls_test.go | 64 ++- .../aclplugin/vppcalls/interfaces_vppcalls.go | 198 ++++----- .../vppcalls/interfaces_vppcalls_test.go | 53 +-- plugins/vpp/aclplugin/vppcalls/vpp_channel.go | 2 + plugins/vpp/aclplugin/vppdump/doc.go | 3 - plugins/vpp/aclplugin/vppdump/vpp_channel.go | 28 -- plugins/vpp/plugin_impl_vpp.go | 35 +- 17 files changed, 580 insertions(+), 601 deletions(-) create mode 100644 plugins/vpp/aclplugin/vppcalls/api_vppcalls.go rename plugins/vpp/aclplugin/{vppdump => vppcalls}/dump_vppcalls.go (68%) rename plugins/vpp/aclplugin/{vppdump => vppcalls}/dump_vppcalls_test.go (84%) delete mode 100644 plugins/vpp/aclplugin/vppdump/doc.go delete mode 100644 plugins/vpp/aclplugin/vppdump/vpp_channel.go diff --git a/plugins/linux/ifplugin/interface_config.go b/plugins/linux/ifplugin/interface_config.go index d3ebceefd5..0d8a018481 100644 --- a/plugins/linux/ifplugin/interface_config.go +++ b/plugins/linux/ifplugin/interface_config.go @@ -73,7 +73,7 @@ type LinuxInterfaceConfigurator struct { ifHandler linuxcalls.NetlinkAPI nsHandler nsplugin.NamespaceAPI - //Timer used to measure and store time + // Timer used to measure and store time stopwatch *measure.Stopwatch } diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index 2edb002e6d..23afcb691c 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -30,7 +30,6 @@ import ( "github.com/unrolled/render" aclcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" - acldump "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppdump" ifplugin "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppdump" l2plugin "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppdump" l3plugin "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppdump" @@ -230,13 +229,14 @@ func (plugin *Plugin) interfaceACLGetHandler(formatter *render.Render) http.Hand defer ch.Close() swIndex := uint32(swIndexuInt64) - res, err := acldump.DumpInterfaceIPAcls(plugin.Deps.Log, swIndex, ch, nil) + aclHandler := aclcalls.NewAclVppHandler(ch, nil, nil) + res, err := aclHandler.DumpInterfaceIPAcls(swIndex) if err != nil { plugin.Deps.Log.Errorf("Error: %v", err) formatter.JSON(w, http.StatusInternalServerError, err) return } - res, err = acldump.DumpInterfaceMACIPAcls(plugin.Log, swIndex, ch, nil) + res, err = aclHandler.DumpInterfaceMACIPAcls(swIndex) if err != nil { plugin.Log.Errorf("Error: %v", err) formatter.JSON(w, http.StatusInternalServerError, err) @@ -262,8 +262,8 @@ func (plugin *Plugin) ipACLGetHandler(formatter *render.Render) http.HandlerFunc return } defer ch.Close() - - res, err := acldump.DumpIPACL(nil, plugin.Deps.Log, ch, nil) + aclHandler := aclcalls.NewAclVppHandler(ch, nil, nil) + res, err := aclHandler.DumpIPACL(nil) if err != nil { plugin.Deps.Log.Errorf("Error: %v", err) formatter.JSON(w, http.StatusInternalServerError, err) @@ -287,8 +287,8 @@ func (plugin *Plugin) macipACLGetHandler(formatter *render.Render) http.HandlerF formatter.JSON(w, http.StatusInternalServerError, err) return } - - res, err := acldump.DumpMACIPACL(nil, plugin.Deps.Log, ch, nil) + aclHandler := aclcalls.NewAclVppHandler(ch, nil, nil) + res, err := aclHandler.DumpMACIPACL(nil) if err != nil { plugin.Log.Errorf("Error: %v", err) formatter.JSON(w, http.StatusInternalServerError, err) @@ -400,7 +400,8 @@ func (plugin *Plugin) ipACLPostHandler(formatter *render.Render) http.HandlerFun var aclIndex struct { Idx uint32 `json:"acl_index"` } - aclIndex.Idx, err = aclcalls.AddIPAcl(aclParam.Rules, aclParam.AclName, plugin.Deps.Log, ch, nil) + aclHandler := aclcalls.NewAclVppHandler(ch, nil, nil) + aclIndex.Idx, err = aclHandler.AddIPAcl(aclParam.Rules, aclParam.AclName) if err != nil { plugin.Deps.Log.Errorf("Error: %v", err) formatter.JSON(w, http.StatusInternalServerError, aclIndex) @@ -441,7 +442,8 @@ func (plugin *Plugin) macipACLPostHandler(formatter *render.Render) http.Handler var aclIndex struct { Idx uint32 `json:"acl_index"` } - aclIndex.Idx, err = aclcalls.AddMacIPAcl(aclParam.Rules, aclParam.AclName, plugin.Deps.Log, ch, nil) + aclHandler := aclcalls.NewAclVppHandler(ch, nil, nil) + aclIndex.Idx, err = aclHandler.AddMacIPAcl(aclParam.Rules, aclParam.AclName) if err != nil { plugin.Log.Errorf("Error: %v", err) formatter.JSON(w, http.StatusInternalServerError, aclIndex) diff --git a/plugins/vpp/aclplugin/acl_config.go b/plugins/vpp/aclplugin/acl_config.go index 4d166770fa..88e0d249b6 100644 --- a/plugins/vpp/aclplugin/acl_config.go +++ b/plugins/vpp/aclplugin/acl_config.go @@ -29,7 +29,6 @@ import ( "github.com/ligato/vpp-agent/plugins/govppmux" "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/aclidx" "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" - "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppdump" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" ) @@ -64,20 +63,20 @@ type ACLConfigurator struct { // Cache for ACL un-configured interfaces ifCache []*ACLIfCacheEntry - // VPP binary api call helper - vppCalls *vppcalls.ACLInterfacesVppCalls - // VPP channels vppChan *api.Channel vppDumpChan *api.Channel + // ACL VPP calls handler + aclHandler vppcalls.AclVppAPI + // Timer used to measure and store time stopwatch *measure.Stopwatch } // Init goroutines, channels and mappings. func (plugin *ACLConfigurator) Init(logger logging.PluginLogger, goVppMux govppmux.API, swIfIndexes ifaceidx.SwIfIndex, - enableStopwatch bool) (err error) { + stopwatch *measure.Stopwatch) (err error) { // Logger plugin.log = logger.NewLogger("-acl-plugin") plugin.log.Infof("Initializing ACL configurator") @@ -97,10 +96,8 @@ func (plugin *ACLConfigurator) Init(logger logging.PluginLogger, goVppMux govppm return err } - // Stopwatch - if enableStopwatch { - plugin.stopwatch = measure.NewStopwatch("ACLConfigurator", plugin.log) - } + // ACL binary api handler + plugin.aclHandler = vppcalls.NewAclVppHandler(plugin.vppChan, plugin.vppDumpChan, stopwatch) // Message compatibility if err = plugin.vppChan.CheckMessageCompatibility(vppcalls.AclMessages...); err != nil { @@ -108,12 +105,12 @@ func (plugin *ACLConfigurator) Init(logger logging.PluginLogger, goVppMux govppm return err } - // VPP calls helper object - plugin.vppCalls = vppcalls.NewACLInterfacesVppCalls(plugin.log, plugin.vppChan, plugin.ifIndexes, plugin.stopwatch) + // Configurator-wide stopwatch instance + plugin.stopwatch = stopwatch // Get VPP ACL plugin version var aclVersion string - if aclVersion, err = vppcalls.GetAclPluginVersion(plugin.vppChan, plugin.stopwatch); err != nil { + if aclVersion, err = plugin.aclHandler.GetAclPluginVersion(); err != nil { return err } plugin.log.Infof("VPP ACL plugin version is %s", aclVersion) @@ -156,7 +153,7 @@ func (plugin *ACLConfigurator) ConfigureACL(acl *acl.AccessLists_Acl) error { var vppACLIndex uint32 var err error if isL2MacIP { - vppACLIndex, err = vppcalls.AddMacIPAcl(rules, acl.AclName, plugin.log, plugin.vppChan, plugin.stopwatch) + vppACLIndex, err = plugin.aclHandler.AddMacIPAcl(rules, acl.AclName) if err != nil { return err } @@ -165,7 +162,7 @@ func (plugin *ACLConfigurator) ConfigureACL(acl *acl.AccessLists_Acl) error { plugin.l2AclIndexes.RegisterName(acl.AclName, agentACLIndex, acl) plugin.log.Debugf("ACL %v registered with index %v", acl.AclName, agentACLIndex) } else { - vppACLIndex, err = vppcalls.AddIPAcl(rules, acl.AclName, plugin.log, plugin.vppChan, plugin.stopwatch) + vppACLIndex, err = plugin.aclHandler.AddIPAcl(rules, acl.AclName) if err != nil { return err } @@ -179,18 +176,18 @@ func (plugin *ACLConfigurator) ConfigureACL(acl *acl.AccessLists_Acl) error { if ifaces := acl.GetInterfaces(); ifaces != nil { if isL2MacIP { aclIfIndices := plugin.getOrCacheInterfaces(acl.Interfaces.Ingress, vppACLIndex, L2) - err := plugin.vppCalls.SetMacIPAclToInterface(vppACLIndex, aclIfIndices) + err := plugin.aclHandler.SetMacIPAclToInterface(vppACLIndex, aclIfIndices) if err != nil { return err } } else { aclIfInIndices := plugin.getOrCacheInterfaces(acl.Interfaces.Ingress, vppACLIndex, INGRESS) - err = plugin.vppCalls.SetACLToInterfacesAsIngress(vppACLIndex, aclIfInIndices) + err = plugin.aclHandler.SetACLToInterfacesAsIngress(vppACLIndex, aclIfInIndices) if err != nil { return err } aclIfEgIndices := plugin.getOrCacheInterfaces(acl.Interfaces.Egress, vppACLIndex, EGRESS) - err = plugin.vppCalls.SetACLToInterfacesAsEgress(vppACLIndex, aclIfEgIndices) + err = plugin.aclHandler.SetACLToInterfacesAsEgress(vppACLIndex, aclIfEgIndices) if err != nil { return err } @@ -229,14 +226,14 @@ func (plugin *ACLConfigurator) ModifyACL(oldACL, newACL *acl.AccessLists_Acl) (e } if isL2MacIP { // L2 ACL - err := vppcalls.ModifyMACIPAcl(vppACLIndex, rules, newACL.AclName, plugin.log, plugin.vppChan, plugin.stopwatch) + err := plugin.aclHandler.ModifyMACIPAcl(vppACLIndex, rules, newACL.AclName) if err != nil { return err } // There is no need to update index because modified ACL keeps the old one. } else { // L3/L4 ACL can be modified directly. - err := vppcalls.ModifyIPAcl(vppACLIndex, rules, newACL.AclName, plugin.log, plugin.vppChan, plugin.stopwatch) + err := plugin.aclHandler.ModifyIPAcl(vppACLIndex, rules, newACL.AclName) if err != nil { return err } @@ -248,7 +245,7 @@ func (plugin *ACLConfigurator) ModifyACL(oldACL, newACL *acl.AccessLists_Acl) (e // Remove L2 ACL from old interfaces. if oldACL.Interfaces != nil { - err := plugin.vppCalls.RemoveMacIPIngressACLFromInterfaces(vppACLIndex, plugin.getInterfaces(oldACL.Interfaces.Ingress)) + err := plugin.aclHandler.RemoveMacIPIngressACLFromInterfaces(vppACLIndex, plugin.getInterfaces(oldACL.Interfaces.Ingress)) if err != nil { return err } @@ -256,7 +253,7 @@ func (plugin *ACLConfigurator) ModifyACL(oldACL, newACL *acl.AccessLists_Acl) (e // Put L2 ACL to new interfaces. if newACL.Interfaces != nil { aclMacInterfaces := plugin.getOrCacheInterfaces(newACL.Interfaces.Ingress, vppACLIndex, L2) - err := plugin.vppCalls.SetMacIPAclToInterface(vppACLIndex, aclMacInterfaces) + err := plugin.aclHandler.SetMacIPAclToInterface(vppACLIndex, aclMacInterfaces) if err != nil { return err } @@ -271,25 +268,25 @@ func (plugin *ACLConfigurator) ModifyACL(oldACL, newACL *acl.AccessLists_Acl) (e addedEgInterfaces, removedEgInterfaces := diffInterfaces(aclOldEgInterfaces, aclNewEgInterfaces) if len(removedInInterfaces) > 0 { - err = plugin.vppCalls.RemoveIPIngressACLFromInterfaces(vppACLIndex, removedInInterfaces) + err = plugin.aclHandler.RemoveIPIngressACLFromInterfaces(vppACLIndex, removedInInterfaces) if err != nil { return err } } if len(removedEgInterfaces) > 0 { - err = plugin.vppCalls.RemoveIPEgressACLFromInterfaces(vppACLIndex, removedEgInterfaces) + err = plugin.aclHandler.RemoveIPEgressACLFromInterfaces(vppACLIndex, removedEgInterfaces) if err != nil { return err } } if len(addedInInterfaces) > 0 { - err = plugin.vppCalls.SetACLToInterfacesAsIngress(vppACLIndex, addedInInterfaces) + err = plugin.aclHandler.SetACLToInterfacesAsIngress(vppACLIndex, addedInInterfaces) if err != nil { return err } } if len(addedEgInterfaces) > 0 { - err = plugin.vppCalls.SetACLToInterfacesAsEgress(vppACLIndex, addedEgInterfaces) + err = plugin.aclHandler.SetACLToInterfacesAsEgress(vppACLIndex, addedEgInterfaces) if err != nil { return err } @@ -314,13 +311,13 @@ func (plugin *ACLConfigurator) DeleteACL(acl *acl.AccessLists_Acl) (err error) { // Remove interfaces from L2 ACL. vppACLIndex := agentL2AclIndex - 1 if acl.Interfaces != nil { - err := plugin.vppCalls.RemoveMacIPIngressACLFromInterfaces(vppACLIndex, plugin.getInterfaces(acl.Interfaces.Ingress)) + err := plugin.aclHandler.RemoveMacIPIngressACLFromInterfaces(vppACLIndex, plugin.getInterfaces(acl.Interfaces.Ingress)) if err != nil { return err } } // Remove ACL L2. - err := vppcalls.DeleteMacIPAcl(vppACLIndex, plugin.log, plugin.vppChan, plugin.stopwatch) + err := plugin.aclHandler.DeleteMacIPAcl(vppACLIndex) if err != nil { return err } @@ -331,18 +328,18 @@ func (plugin *ACLConfigurator) DeleteACL(acl *acl.AccessLists_Acl) (err error) { // Remove interfaces. vppACLIndex := agentL3L4AclIndex - 1 if acl.Interfaces != nil { - err = plugin.vppCalls.RemoveIPIngressACLFromInterfaces(vppACLIndex, plugin.getInterfaces(acl.Interfaces.Ingress)) + err = plugin.aclHandler.RemoveIPIngressACLFromInterfaces(vppACLIndex, plugin.getInterfaces(acl.Interfaces.Ingress)) if err != nil { return err } - err = plugin.vppCalls.RemoveIPEgressACLFromInterfaces(vppACLIndex, plugin.getInterfaces(acl.Interfaces.Egress)) + err = plugin.aclHandler.RemoveIPEgressACLFromInterfaces(vppACLIndex, plugin.getInterfaces(acl.Interfaces.Egress)) if err != nil { return err } } // Remove ACL L3/L4. - err := vppcalls.DeleteIPAcl(vppACLIndex, plugin.log, plugin.vppChan, plugin.stopwatch) + err := plugin.aclHandler.DeleteIPAcl(vppACLIndex) if err != nil { return err } @@ -355,7 +352,7 @@ func (plugin *ACLConfigurator) DeleteACL(acl *acl.AccessLists_Acl) (err error) { // DumpIPACL returns all configured IP ACLs in proto format func (plugin *ACLConfigurator) DumpIPACL() (acls []*acl.AccessLists_Acl, err error) { - aclsWithIndex, err := vppdump.DumpIPACL(plugin.ifIndexes, plugin.log, plugin.vppDumpChan, plugin.stopwatch) + aclsWithIndex, err := plugin.aclHandler.DumpIPACL(plugin.ifIndexes) if err != nil { plugin.log.Error(err) return nil, err @@ -368,7 +365,7 @@ func (plugin *ACLConfigurator) DumpIPACL() (acls []*acl.AccessLists_Acl, err err // DumpMACIPACL returns all configured MACIP ACLs in proto format func (plugin *ACLConfigurator) DumpMACIPACL() (acls []*acl.AccessLists_Acl, err error) { - aclsWithIndex, err := vppdump.DumpMACIPACL(plugin.ifIndexes, plugin.log, plugin.vppDumpChan, plugin.stopwatch) + aclsWithIndex, err := plugin.aclHandler.DumpMACIPACL(plugin.ifIndexes) if err != nil { plugin.log.Error(err) return nil, err @@ -421,17 +418,17 @@ func (plugin *ACLConfigurator) ResolveCreatedInterface(ifName string, ifIdx uint var ifIndices []uint32 switch aclCacheEntry.ifAttr { case L2: - if err := plugin.vppCalls.SetMacIPAclToInterface(aclCacheEntry.aclID, append(ifIndices, ifIdx)); err != nil { + if err := plugin.aclHandler.SetMacIPAclToInterface(aclCacheEntry.aclID, append(ifIndices, ifIdx)); err != nil { plugin.log.Error(err) wasErr = err } case INGRESS: - if err := plugin.vppCalls.SetACLToInterfacesAsIngress(aclCacheEntry.aclID, append(ifIndices, ifIdx)); err != nil { + if err := plugin.aclHandler.SetACLToInterfacesAsIngress(aclCacheEntry.aclID, append(ifIndices, ifIdx)); err != nil { plugin.log.Error(err) wasErr = err } case EGRESS: - if err := plugin.vppCalls.SetACLToInterfacesAsEgress(aclCacheEntry.aclID, append(ifIndices, ifIdx)); err != nil { + if err := plugin.aclHandler.SetACLToInterfacesAsEgress(aclCacheEntry.aclID, append(ifIndices, ifIdx)); err != nil { plugin.log.Error(err) wasErr = err } diff --git a/plugins/vpp/aclplugin/acl_config_test.go b/plugins/vpp/aclplugin/acl_config_test.go index cd716b5cf2..f45187fa77 100644 --- a/plugins/vpp/aclplugin/acl_config_test.go +++ b/plugins/vpp/aclplugin/acl_config_test.go @@ -87,7 +87,7 @@ func TestAclConfiguratorInit(t *testing.T) { ctx.MockVpp.MockReply(&acl_api.ACLPluginGetVersionReply{}) // Test init - err := plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, nil, false) + err := plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, nil, nil) Expect(err).To(BeNil()) err = plugin.Close() Expect(err).To(BeNil()) @@ -446,7 +446,7 @@ func aclTestSetup(t *testing.T, createIfs bool) (*vppcallmock.TestCtx, *core.Con // Configurator ctx.MockVpp.MockReply(&acl_api.ACLPluginGetVersionReply{1, 0}) plugin := &aclplugin.ACLConfigurator{} - err = plugin.Init(log, connection, ifIndexes, false) + err = plugin.Init(log, connection, ifIndexes, nil) Expect(err).To(BeNil()) return ctx, connection, plugin diff --git a/plugins/vpp/aclplugin/data_resync.go b/plugins/vpp/aclplugin/data_resync.go index 717c3e1f59..1d11d813b5 100644 --- a/plugins/vpp/aclplugin/data_resync.go +++ b/plugins/vpp/aclplugin/data_resync.go @@ -15,8 +15,9 @@ package aclplugin import ( - "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" - "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppdump" + "time" + + acl_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/acl" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" ) @@ -24,22 +25,20 @@ import ( func (plugin *ACLConfigurator) Resync(nbACLs []*acl.AccessLists_Acl) error { plugin.log.Debug("Resync ACLs started") // Calculate and log acl resync. - defer func() { - if plugin.stopwatch != nil { - plugin.stopwatch.PrintLog() - } - }() + defer func(t time.Time) { + plugin.stopwatch.TimeLog(acl_api.MacipACLDel{}).LogTimeEntry(time.Since(t)) + }(time.Now()) // Re-initialize cache plugin.clearMapping() // Retrieve existing IpACL config - vppIpACLs, err := vppdump.DumpIPACL(plugin.ifIndexes, plugin.log, plugin.vppChan, plugin.stopwatch) + vppIpACLs, err := plugin.aclHandler.DumpIPACL(plugin.ifIndexes) if err != nil { return err } // Retrieve existing MacIpACL config - vppMacIpACLs, err := vppdump.DumpMACIPACL(plugin.ifIndexes, plugin.log, plugin.vppChan, plugin.stopwatch) + vppMacIpACLs, err := plugin.aclHandler.DumpMACIPACL(plugin.ifIndexes) if err != nil { return err } @@ -54,7 +53,7 @@ func (plugin *ACLConfigurator) Resync(nbACLs []*acl.AccessLists_Acl) error { ipRulesExist := len(vppIpACL.ACLDetails.Rules) > 0 && vppIpACL.ACLDetails.Rules[0].GetMatch().GetIpRule() != nil if ipRulesExist { - if err := vppcalls.DeleteIPAcl(vppIpACL.Identifier.ACLIndex, plugin.log, plugin.vppChan, plugin.stopwatch); err != nil { + if err := plugin.aclHandler.DeleteIPAcl(vppIpACL.Identifier.ACLIndex); err != nil { plugin.log.Error(err) return err } @@ -67,7 +66,7 @@ func (plugin *ACLConfigurator) Resync(nbACLs []*acl.AccessLists_Acl) error { ipRulesExist := len(vppMacIpACL.ACLDetails.Rules) > 0 && vppMacIpACL.ACLDetails.Rules[0].GetMatch().GetMacipRule() != nil if ipRulesExist { - if err := vppcalls.DeleteMacIPAcl(vppMacIpACL.Identifier.ACLIndex, plugin.log, plugin.vppChan, plugin.stopwatch); err != nil { + if err := plugin.aclHandler.DeleteMacIPAcl(vppMacIpACL.Identifier.ACLIndex); err != nil { plugin.log.Error(err) return err } diff --git a/plugins/vpp/aclplugin/vppcalls/acl_vppcalls.go b/plugins/vpp/aclplugin/vppcalls/acl_vppcalls.go index 7a1ce4e962..0b247edb2d 100644 --- a/plugins/vpp/aclplugin/vppcalls/acl_vppcalls.go +++ b/plugins/vpp/aclplugin/vppcalls/acl_vppcalls.go @@ -22,9 +22,6 @@ import ( "time" govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/measure" - "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppdump" acl_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/acl" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" ) @@ -55,31 +52,28 @@ var AclMessages = []govppapi.Message{ &acl_api.MacipACLInterfaceAddDelReply{}, } -// GetAclPluginVersion returns version of the VPP ACL plugin -func GetAclPluginVersion(vppChannel vppdump.VPPChannel, stopwatch *measure.Stopwatch) (string, error) { +func (handler *aclVppHandler) GetAclPluginVersion() (string, error) { defer func(t time.Time) { - stopwatch.TimeLog(acl_api.ACLPluginGetVersion{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(acl_api.ACLPluginGetVersion{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &acl_api.ACLPluginGetVersion{} reply := &acl_api.ACLPluginGetVersionReply{} // Does not return retval - if err := vppChannel.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return "", fmt.Errorf("failed to get VPP ACL plugin version: %v", err) } return strconv.Itoa(int(reply.Major)) + "." + strconv.Itoa(int(reply.Minor)), nil } -// AddIPAcl create new L3/4 ACL. Input index == 0xffffffff, VPP provides index in reply. -func AddIPAcl(rules []*acl.AccessLists_Acl_Rule, aclName string, log logging.Logger, - vppChannel vppdump.VPPChannel, stopwatch *measure.Stopwatch) (uint32, error) { +func (handler *aclVppHandler) AddIPAcl(rules []*acl.AccessLists_Acl_Rule, aclName string) (uint32, error) { defer func(t time.Time) { - stopwatch.TimeLog(acl_api.ACLAddReplace{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(acl_api.ACLAddReplace{}).LogTimeEntry(time.Since(t)) }(time.Now()) // Prepare Ip rules - aclIPRules, err := transformACLIpRules(rules) + aclIPRules, err := handler.transformACLIpRules(rules) if err != nil { return 0, err } @@ -95,32 +89,27 @@ func AddIPAcl(rules []*acl.AccessLists_Acl_Rule, aclName string, log logging.Log } reply := &acl_api.ACLAddReplaceReply{} - if err = vppChannel.SendRequest(req).ReceiveReply(reply); err != nil { + if err = handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return 0, fmt.Errorf("failed to write ACL %v: %v", aclName, err) } if reply.Retval != 0 { return 0, fmt.Errorf("%s returned %v while writing ACL %v to VPP", reply.GetMessageName(), reply.Retval, aclName) } - log.Infof("%v Ip ACL rule(s) written for ACL %v with index %v", len(aclIPRules), aclName, reply.ACLIndex) - return reply.ACLIndex, nil } -// AddMacIPAcl creates new L2 MAC IP ACL. VPP provides index in reply. -func AddMacIPAcl(rules []*acl.AccessLists_Acl_Rule, aclName string, log logging.Logger, - vppChannel vppdump.VPPChannel, stopwatch *measure.Stopwatch) (uint32, error) { +func (handler *aclVppHandler) AddMacIPAcl(rules []*acl.AccessLists_Acl_Rule, aclName string) (uint32, error) { defer func(t time.Time) { - stopwatch.TimeLog(acl_api.MacipACLAdd{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(acl_api.MacipACLAdd{}).LogTimeEntry(time.Since(t)) }(time.Now()) // Prepare MAc Ip rules - aclMacIPRules, err := transformACLMacIPRules(rules) + aclMacIPRules, err := handler.transformACLMacIPRules(rules) if err != nil { return 0, err } if len(aclMacIPRules) == 0 { - log.Debugf("No Mac Ip ACL rules written for ACL %v", aclName) return 0, fmt.Errorf("no rules found for ACL %v", aclName) } @@ -131,33 +120,27 @@ func AddMacIPAcl(rules []*acl.AccessLists_Acl_Rule, aclName string, log logging. } reply := &acl_api.MacipACLAddReply{} - if err := vppChannel.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return 0, fmt.Errorf("failed to write ACL %v: %v", aclName, err) } if reply.Retval != 0 { return 0, fmt.Errorf("%s returned %v while writing ACL %v to VPP", reply.GetMessageName(), reply.Retval, aclName) } - log.Infof("%v Mac Ip ACL rule(s) written for ACL %v with index %v", len(aclMacIPRules), aclName, reply.ACLIndex) - return reply.ACLIndex, nil } -// ModifyIPAcl uses index (provided by VPP) to identify ACL which is modified. -func ModifyIPAcl(aclIndex uint32, rules []*acl.AccessLists_Acl_Rule, aclName string, log logging.Logger, - vppChannel vppdump.VPPChannel, stopwatch *measure.Stopwatch) error { - +func (handler *aclVppHandler) ModifyIPAcl(aclIndex uint32, rules []*acl.AccessLists_Acl_Rule, aclName string) error { defer func(t time.Time) { - stopwatch.TimeLog(acl_api.ACLAddReplace{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(acl_api.ACLAddReplace{}).LogTimeEntry(time.Since(t)) }(time.Now()) // Prepare Ip rules - aclIPRules, err := transformACLIpRules(rules) + aclIPRules, err := handler.transformACLIpRules(rules) if err != nil { return err } if len(aclIPRules) == 0 { - log.Debugf("No Ip ACL rules written for ACL %v", aclName) return nil } @@ -169,28 +152,24 @@ func ModifyIPAcl(aclIndex uint32, rules []*acl.AccessLists_Acl_Rule, aclName str } reply := &acl_api.ACLAddReplaceReply{} - if err := vppChannel.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return fmt.Errorf("failed to write ACL %v: %v", aclName, err) } if reply.Retval != 0 { return fmt.Errorf("%s returned %v while writing ACL %v to VPP", reply.GetMessageName(), reply.Retval, aclName) } - log.Infof("%v Ip ACL rule(s) written for ACL %v with index %v", len(aclIPRules), aclName, aclIndex) - return nil } -// ModifyMACIPAcl uses index (provided by VPP) to identify ACL which is modified. -func ModifyMACIPAcl(aclIndex uint32, rules []*acl.AccessLists_Acl_Rule, aclName string, log logging.Logger, - vppChannel vppdump.VPPChannel, stopwatch *measure.Stopwatch) error { +func (handler *aclVppHandler) ModifyMACIPAcl(aclIndex uint32, rules []*acl.AccessLists_Acl_Rule, aclName string) error { defer func(t time.Time) { - stopwatch.TimeLog(acl_api.ACLAddReplace{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(acl_api.ACLAddReplace{}).LogTimeEntry(time.Since(t)) }(time.Now()) // Prepare MAc Ip rules - aclMacIPRules, err := transformACLMacIPRules(rules) + aclMacIPRules, err := handler.transformACLMacIPRules(rules) if err != nil { return err } @@ -206,22 +185,19 @@ func ModifyMACIPAcl(aclIndex uint32, rules []*acl.AccessLists_Acl_Rule, aclName } reply := &acl_api.MacipACLAddReplaceReply{} - if err := vppChannel.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return fmt.Errorf("failed to write ACL %v: %v", aclName, err) } if reply.Retval != 0 { return fmt.Errorf("%s returned %v while writing ACL %v to VPP", reply.GetMessageName(), reply.Retval, aclName) } - log.Infof("%v Ip ACL rule(s) written for ACL %v with index %v", len(aclMacIPRules), aclName, aclIndex) - return nil } -// DeleteIPAcl removes L3/L4 ACL. -func DeleteIPAcl(aclIndex uint32, log logging.Logger, vppChannel vppdump.VPPChannel, stopwatch *measure.Stopwatch) error { +func (handler *aclVppHandler) DeleteIPAcl(aclIndex uint32) error { defer func(t time.Time) { - stopwatch.TimeLog(acl_api.ACLDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(acl_api.ACLDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) msg := &acl_api.ACLDel{ @@ -229,22 +205,19 @@ func DeleteIPAcl(aclIndex uint32, log logging.Logger, vppChannel vppdump.VPPChan } reply := &acl_api.ACLDelReply{} - if err := vppChannel.SendRequest(msg).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(msg).ReceiveReply(reply); err != nil { return fmt.Errorf("failed to remove L3/L4 ACL %v: %v", aclIndex, err) } if reply.Retval != 0 { return fmt.Errorf("%s returned %v while removing L3/L4 ACL %v", reply.GetMessageName(), reply.Retval, aclIndex) } - log.Infof("L3/L4 ACL %v removed", aclIndex) - return nil } -// DeleteMacIPAcl removes L2 ACL. -func DeleteMacIPAcl(aclIndex uint32, log logging.Logger, vppChannel vppdump.VPPChannel, stopwatch *measure.Stopwatch) error { +func (handler *aclVppHandler) DeleteMacIPAcl(aclIndex uint32) error { defer func(t time.Time) { - stopwatch.TimeLog(acl_api.MacipACLDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(acl_api.MacipACLDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) msg := &acl_api.MacipACLDel{ @@ -252,20 +225,18 @@ func DeleteMacIPAcl(aclIndex uint32, log logging.Logger, vppChannel vppdump.VPPC } reply := &acl_api.MacipACLDelReply{} - if err := vppChannel.SendRequest(msg).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(msg).ReceiveReply(reply); err != nil { return fmt.Errorf("failed to remove L2 ACL %v: %v", aclIndex, err) } if reply.Retval != 0 { return fmt.Errorf("%s returned %v while removing L2 ACL %v", reply.GetMessageName(), reply.Retval, aclIndex) } - log.Infof("L2 ACL %v removed", aclIndex) - return nil } // Method transforms provided set of IP proto ACL rules to binapi ACL rules. -func transformACLIpRules(rules []*acl.AccessLists_Acl_Rule) (aclIPRules []acl_api.ACLRule, err error) { +func (handler *aclVppHandler) transformACLIpRules(rules []*acl.AccessLists_Acl_Rule) (aclIPRules []acl_api.ACLRule, err error) { for _, rule := range rules { aclRule := &acl_api.ACLRule{ IsPermit: uint8(rule.AclAction), @@ -275,18 +246,18 @@ func transformACLIpRules(rules []*acl.AccessLists_Acl_Rule) (aclIPRules []acl_ap // Concerned to IP rules only // L3 if ipRule.Ip != nil { - aclRule, err = ipACL(ipRule.Ip, aclRule) + aclRule, err = handler.ipACL(ipRule.Ip, aclRule) if err != nil { return nil, err } } // ICMP/L4 if ipRule.Icmp != nil { - aclRule = icmpACL(ipRule.Icmp, aclRule) + aclRule = handler.icmpACL(ipRule.Icmp, aclRule) } else if ipRule.Tcp != nil { - aclRule = tcpACL(ipRule.Tcp, aclRule) + aclRule = handler.tcpACL(ipRule.Tcp, aclRule) } else if ipRule.Udp != nil { - aclRule = udpACL(ipRule.Udp, aclRule) + aclRule = handler.udpACL(ipRule.Udp, aclRule) } aclIPRules = append(aclIPRules, *aclRule) } @@ -294,7 +265,7 @@ func transformACLIpRules(rules []*acl.AccessLists_Acl_Rule) (aclIPRules []acl_ap return aclIPRules, nil } -func transformACLMacIPRules(rules []*acl.AccessLists_Acl_Rule) (aclMacIPRules []acl_api.MacipACLRule, err error) { +func (handler *aclVppHandler) transformACLMacIPRules(rules []*acl.AccessLists_Acl_Rule) (aclMacIPRules []acl_api.MacipACLRule, err error) { for _, rule := range rules { aclMacIPRule := &acl_api.MacipACLRule{ IsPermit: uint8(rule.AclAction), @@ -334,7 +305,7 @@ func transformACLMacIPRules(rules []*acl.AccessLists_Acl_Rule) (aclMacIPRules [] // The function sets an IP ACL rule fields into provided ACL Rule object. Source // and destination addresses have to be the same IP version and contain a network mask. -func ipACL(ipRule *acl.AccessLists_Acl_Rule_Match_IpRule_Ip, aclRule *acl_api.ACLRule) (*acl_api.ACLRule, error) { +func (handler *aclVppHandler) ipACL(ipRule *acl.AccessLists_Acl_Rule_Match_IpRule_Ip, aclRule *acl_api.ACLRule) (*acl_api.ACLRule, error) { var ( err error srcIP net.IP @@ -401,12 +372,12 @@ func ipACL(ipRule *acl.AccessLists_Acl_Rule_Match_IpRule_Ip, aclRule *acl_api.AC // The function sets an ICMP ACL rule fields into provided ACL Rule object. // The ranges are exclusive, use first = 0 and last = 255/65535 (icmpv4/icmpv6) to match "any". -func icmpACL(icmpRule *acl.AccessLists_Acl_Rule_Match_IpRule_Icmp, aclRule *acl_api.ACLRule) *acl_api.ACLRule { +func (handler *aclVppHandler) icmpACL(icmpRule *acl.AccessLists_Acl_Rule_Match_IpRule_Icmp, aclRule *acl_api.ACLRule) *acl_api.ACLRule { if icmpRule == nil { return aclRule } if icmpRule.Icmpv6 { - aclRule.Proto = vppdump.ICMPv6Proto // IANA ICMPv6 + aclRule.Proto = ICMPv6Proto // IANA ICMPv6 aclRule.IsIpv6 = 1 // ICMPv6 type range aclRule.SrcportOrIcmptypeFirst = uint16(icmpRule.IcmpTypeRange.First) @@ -415,7 +386,7 @@ func icmpACL(icmpRule *acl.AccessLists_Acl_Rule_Match_IpRule_Icmp, aclRule *acl_ aclRule.DstportOrIcmpcodeFirst = uint16(icmpRule.IcmpCodeRange.First) aclRule.DstportOrIcmpcodeLast = uint16(icmpRule.IcmpCodeRange.First) } else { - aclRule.Proto = vppdump.ICMPv4Proto // IANA ICMPv4 + aclRule.Proto = ICMPv4Proto // IANA ICMPv4 aclRule.IsIpv6 = 0 // ICMPv4 type range aclRule.SrcportOrIcmptypeFirst = uint16(icmpRule.IcmpTypeRange.First) @@ -428,8 +399,8 @@ func icmpACL(icmpRule *acl.AccessLists_Acl_Rule_Match_IpRule_Icmp, aclRule *acl_ } // Sets an TCP ACL rule fields into provided ACL Rule object. -func tcpACL(tcpRule *acl.AccessLists_Acl_Rule_Match_IpRule_Tcp, aclRule *acl_api.ACLRule) *acl_api.ACLRule { - aclRule.Proto = vppdump.TCPProto // IANA TCP +func (handler *aclVppHandler) tcpACL(tcpRule *acl.AccessLists_Acl_Rule_Match_IpRule_Tcp, aclRule *acl_api.ACLRule) *acl_api.ACLRule { + aclRule.Proto = TCPProto // IANA TCP aclRule.SrcportOrIcmptypeFirst = uint16(tcpRule.SourcePortRange.LowerPort) aclRule.SrcportOrIcmptypeLast = uint16(tcpRule.SourcePortRange.UpperPort) aclRule.DstportOrIcmpcodeFirst = uint16(tcpRule.DestinationPortRange.LowerPort) @@ -440,8 +411,8 @@ func tcpACL(tcpRule *acl.AccessLists_Acl_Rule_Match_IpRule_Tcp, aclRule *acl_api } // Sets an UDP ACL rule fields into provided ACL Rule object. -func udpACL(udpRule *acl.AccessLists_Acl_Rule_Match_IpRule_Udp, aclRule *acl_api.ACLRule) *acl_api.ACLRule { - aclRule.Proto = vppdump.UDPProto // IANA UDP +func (handler *aclVppHandler) udpACL(udpRule *acl.AccessLists_Acl_Rule_Match_IpRule_Udp, aclRule *acl_api.ACLRule) *acl_api.ACLRule { + aclRule.Proto = UDPProto // IANA UDP aclRule.SrcportOrIcmptypeFirst = uint16(udpRule.SourcePortRange.LowerPort) aclRule.SrcportOrIcmptypeLast = uint16(udpRule.SourcePortRange.UpperPort) aclRule.DstportOrIcmpcodeFirst = uint16(udpRule.DestinationPortRange.LowerPort) diff --git a/plugins/vpp/aclplugin/vppcalls/acl_vppcalls_test.go b/plugins/vpp/aclplugin/vppcalls/acl_vppcalls_test.go index 3686f9445f..58ff58bf45 100644 --- a/plugins/vpp/aclplugin/vppcalls/acl_vppcalls_test.go +++ b/plugins/vpp/aclplugin/vppcalls/acl_vppcalls_test.go @@ -17,7 +17,6 @@ package vppcalls import ( "testing" - "github.com/ligato/cn-infra/logging/logrus" acl_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/acl" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" "github.com/ligato/vpp-agent/tests/vppcallmock" @@ -258,28 +257,30 @@ func TestAddIPAcl(t *testing.T) { defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&acl_api.ACLAddReplaceReply{}) - aclIndex, err := AddIPAcl(acl_IPrules, "test0", logrus.DefaultLogger(), ctx.MockChannel, nil) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + + aclIndex, err := aclHandler.AddIPAcl(acl_IPrules, "test0") Expect(err).To(BeNil()) Expect(aclIndex).To(BeEquivalentTo(0)) - _, err = AddIPAcl(aclNoRules, "test1", logrus.DefaultLogger(), ctx.MockChannel, nil) + _, err = aclHandler.AddIPAcl(aclNoRules, "test1") Expect(err).To(Not(BeNil())) - _, err = AddIPAcl(aclErr1Rules, "test2", logrus.DefaultLogger(), ctx.MockChannel, nil) + _, err = aclHandler.AddIPAcl(aclErr1Rules, "test2") Expect(err).To(Not(BeNil())) - _, err = AddIPAcl(aclErr2Rules, "test3", logrus.DefaultLogger(), ctx.MockChannel, nil) + _, err = aclHandler.AddIPAcl(aclErr2Rules, "test3") Expect(err).To(Not(BeNil())) - _, err = AddIPAcl(aclErr3Rules, "test4", logrus.DefaultLogger(), ctx.MockChannel, nil) + _, err = aclHandler.AddIPAcl(aclErr3Rules, "test4") Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.MacipACLAddReply{}) - _, err = AddIPAcl(acl_IPrules, "test5", logrus.DefaultLogger(), ctx.MockChannel, nil) + _, err = aclHandler.AddIPAcl(acl_IPrules, "test5") Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.ACLAddReplaceReply{Retval: -1}) - _, err = AddIPAcl(acl_IPrules, "test6", logrus.DefaultLogger(), ctx.MockChannel, nil) + _, err = aclHandler.AddIPAcl(acl_IPrules, "test6") Expect(err).To(Not(BeNil())) } @@ -289,29 +290,31 @@ func TestAddMacIPAcl(t *testing.T) { defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&acl_api.MacipACLAddReply{}) - aclIndex, err := AddMacIPAcl(acl_MACIPrules, "test6", logrus.DefaultLogger(), ctx.MockChannel, nil) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + + aclIndex, err := aclHandler.AddMacIPAcl(acl_MACIPrules, "test6") Expect(err).To(BeNil()) Expect(aclIndex).To(BeEquivalentTo(0)) - _, err = AddMacIPAcl(aclNoRules, "test7", logrus.DefaultLogger(), ctx.MockChannel, nil) + _, err = aclHandler.AddMacIPAcl(aclNoRules, "test7") Expect(err).To(Not(BeNil())) - _, err = AddMacIPAcl(aclErr4Rules, "test8", logrus.DefaultLogger(), ctx.MockChannel, nil) + _, err = aclHandler.AddMacIPAcl(aclErr4Rules, "test8") Expect(err).To(Not(BeNil())) - _, err = AddMacIPAcl(aclErr5Rules, "test9", logrus.DefaultLogger(), ctx.MockChannel, nil) + _, err = aclHandler.AddMacIPAcl(aclErr5Rules, "test9") Expect(err).To(Not(BeNil())) - _, err = AddMacIPAcl(aclErr6Rules, "test10", logrus.DefaultLogger(), ctx.MockChannel, nil) + _, err = aclHandler.AddMacIPAcl(aclErr6Rules, "test10") Expect(err).To(Not(BeNil())) Expect(err.Error()).To(BeEquivalentTo("invalid IP address ")) ctx.MockVpp.MockReply(&acl_api.ACLAddReplaceReply{}) - _, err = AddMacIPAcl(acl_MACIPrules, "test11", logrus.DefaultLogger(), ctx.MockChannel, nil) + _, err = aclHandler.AddMacIPAcl(acl_MACIPrules, "test11") Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.MacipACLAddReply{Retval: -1}) - _, err = AddMacIPAcl(acl_MACIPrules, "test12", logrus.DefaultLogger(), ctx.MockChannel, nil) + _, err = aclHandler.AddMacIPAcl(acl_MACIPrules, "test12") Expect(err).To(Not(BeNil())) } @@ -321,7 +324,9 @@ func TestDeleteIPAcl(t *testing.T) { defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&acl_api.ACLAddReplaceReply{}) - aclIndex, err := AddIPAcl(acl_IPrules, "test_del0", logrus.DefaultLogger(), ctx.MockChannel, nil) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + + aclIndex, err := aclHandler.AddIPAcl(acl_IPrules, "test_del0") Expect(err).To(BeNil()) Expect(aclIndex).To(BeEquivalentTo(0)) @@ -341,20 +346,20 @@ func TestDeleteIPAcl(t *testing.T) { } ctx.MockVpp.MockReply(&acl_api.ACLAddReplaceReply{ACLIndex: 1}) - aclIndex, err = AddIPAcl(rule2del, "test_del1", logrus.DefaultLogger(), ctx.MockChannel, nil) + aclIndex, err = aclHandler.AddIPAcl(rule2del, "test_del1") Expect(err).To(BeNil()) Expect(aclIndex).To(BeEquivalentTo(1)) ctx.MockVpp.MockReply(&acl_api.ACLAddReplaceReply{}) - err = DeleteIPAcl(5, logrus.DefaultLogger(), ctx.MockChannel, nil) + err = aclHandler.DeleteIPAcl(5) Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.ACLDelReply{Retval: -1}) - err = DeleteIPAcl(5, logrus.DefaultLogger(), ctx.MockChannel, nil) + err = aclHandler.DeleteIPAcl(5) Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.ACLDelReply{}) - err = DeleteIPAcl(1, logrus.DefaultLogger(), ctx.MockChannel, nil) + err = aclHandler.DeleteIPAcl(1) Expect(err).To(BeNil()) } @@ -364,7 +369,9 @@ func TestDeleteMACIPAcl(t *testing.T) { defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&acl_api.MacipACLAddReply{}) - aclIndex, err := AddMacIPAcl(acl_MACIPrules, "test_del2", logrus.DefaultLogger(), ctx.MockChannel, nil) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + + aclIndex, err := aclHandler.AddMacIPAcl(acl_MACIPrules, "test_del2") Expect(err).To(BeNil()) Expect(aclIndex).To(BeEquivalentTo(0)) @@ -384,20 +391,20 @@ func TestDeleteMACIPAcl(t *testing.T) { } ctx.MockVpp.MockReply(&acl_api.MacipACLAddReply{ACLIndex: 1}) - aclIndex, err = AddMacIPAcl(rule2del, "test_del3", logrus.DefaultLogger(), ctx.MockChannel, nil) + aclIndex, err = aclHandler.AddMacIPAcl(rule2del, "test_del3") Expect(err).To(BeNil()) Expect(aclIndex).To(BeEquivalentTo(1)) ctx.MockVpp.MockReply(&acl_api.MacipACLAddReply{}) - err = DeleteMacIPAcl(5, logrus.DefaultLogger(), ctx.MockChannel, nil) + err = aclHandler.DeleteMacIPAcl(5) Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.MacipACLDelReply{Retval: -1}) - err = DeleteMacIPAcl(5, logrus.DefaultLogger(), ctx.MockChannel, nil) + err = aclHandler.DeleteMacIPAcl(5) Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.MacipACLDelReply{}) - err = DeleteMacIPAcl(1, logrus.DefaultLogger(), ctx.MockChannel, nil) + err = aclHandler.DeleteMacIPAcl(1) Expect(err).To(BeNil()) } @@ -407,7 +414,9 @@ func TestModifyIPAcl(t *testing.T) { defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&acl_api.ACLAddReplaceReply{}) - aclIndex, err := AddIPAcl(acl_IPrules, "test_modify", logrus.DefaultLogger(), ctx.MockChannel, nil) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + + aclIndex, err := aclHandler.AddIPAcl(acl_IPrules, "test_modify") Expect(err).To(BeNil()) Expect(aclIndex).To(BeEquivalentTo(0)) @@ -439,21 +448,21 @@ func TestModifyIPAcl(t *testing.T) { } ctx.MockVpp.MockReply(&acl_api.ACLAddReplaceReply{}) - err = ModifyIPAcl(0, rule2modify, "test_modify0", logrus.DefaultLogger(), ctx.MockChannel, nil) + err = aclHandler.ModifyIPAcl(0, rule2modify, "test_modify0") Expect(err).To(BeNil()) - err = ModifyIPAcl(0, aclErr1Rules, "test_modify1", logrus.DefaultLogger(), ctx.MockChannel, nil) + err = aclHandler.ModifyIPAcl(0, aclErr1Rules, "test_modify1") Expect(err).To(Not(BeNil())) - err = ModifyIPAcl(0, aclNoRules, "test_modify2", logrus.DefaultLogger(), ctx.MockChannel, nil) + err = aclHandler.ModifyIPAcl(0, aclNoRules, "test_modify2") Expect(err).To(BeNil()) ctx.MockVpp.MockReply(&acl_api.MacipACLAddReplaceReply{}) - err = ModifyIPAcl(0, acl_IPrules, "test_modify3", logrus.DefaultLogger(), ctx.MockChannel, nil) + err = aclHandler.ModifyIPAcl(0, acl_IPrules, "test_modify3") Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.ACLAddReplaceReply{Retval: -1}) - err = ModifyIPAcl(0, acl_IPrules, "test_modify4", logrus.DefaultLogger(), ctx.MockChannel, nil) + err = aclHandler.ModifyIPAcl(0, acl_IPrules, "test_modify4") Expect(err).To(Not(BeNil())) } @@ -463,7 +472,9 @@ func TestModifyMACIPAcl(t *testing.T) { defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&acl_api.MacipACLAddReply{}) - aclIndex, err := AddMacIPAcl(acl_MACIPrules, "test_modify", logrus.DefaultLogger(), ctx.MockChannel, nil) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + + aclIndex, err := aclHandler.AddMacIPAcl(acl_MACIPrules, "test_modify") Expect(err).To(BeNil()) Expect(aclIndex).To(BeEquivalentTo(0)) @@ -495,17 +506,17 @@ func TestModifyMACIPAcl(t *testing.T) { } ctx.MockVpp.MockReply(&acl_api.MacipACLAddReplaceReply{}) - err = ModifyMACIPAcl(0, rule2modify, "test_modify0", logrus.DefaultLogger(), ctx.MockChannel, nil) + err = aclHandler.ModifyMACIPAcl(0, rule2modify, "test_modify0") Expect(err).To(BeNil()) - err = ModifyMACIPAcl(0, aclErr1Rules, "test_modify1", logrus.DefaultLogger(), ctx.MockChannel, nil) + err = aclHandler.ModifyMACIPAcl(0, aclErr1Rules, "test_modify1") Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.MacipACLAddReplaceReply{}) - err = ModifyMACIPAcl(0, acl_IPrules, "test_modify3", logrus.DefaultLogger(), ctx.MockChannel, nil) + err = aclHandler.ModifyMACIPAcl(0, acl_IPrules, "test_modify3") Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.MacipACLAddReplaceReply{Retval: -1}) - err = ModifyMACIPAcl(0, acl_IPrules, "test_modify4", logrus.DefaultLogger(), ctx.MockChannel, nil) + err = aclHandler.ModifyMACIPAcl(0, acl_IPrules, "test_modify4") Expect(err).To(Not(BeNil())) } diff --git a/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go b/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go new file mode 100644 index 0000000000..1c197c36a8 --- /dev/null +++ b/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go @@ -0,0 +1,90 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vppcalls + +import ( + "github.com/ligato/cn-infra/logging/measure" + acl_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/acl" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" + "github.com/ligato/vpp-agent/plugins/vpp/model/acl" +) + +// AclVppAPI provides methods required to handle VPP access lists +type AclVppAPI interface { + // GetAclPluginVersion returns version of the VPP ACL plugin + GetAclPluginVersion() (string, error) + // AddIPAcl create new L3/4 ACL. Input index == 0xffffffff, VPP provides index in reply. + AddIPAcl(rules []*acl.AccessLists_Acl_Rule, aclName string) (uint32, error) + // AddMacIPAcl creates new L2 MAC IP ACL. VPP provides index in reply. + AddMacIPAcl(rules []*acl.AccessLists_Acl_Rule, aclName string) (uint32, error) + // ModifyIPAcl uses index (provided by VPP) to identify ACL which is modified. + ModifyIPAcl(aclIndex uint32, rules []*acl.AccessLists_Acl_Rule, aclName string) error + // ModifyMACIPAcl uses index (provided by VPP) to identify ACL which is modified. + ModifyMACIPAcl(aclIndex uint32, rules []*acl.AccessLists_Acl_Rule, aclName string) error + // DeleteIPAcl removes L3/L4 ACL. + DeleteIPAcl(aclIndex uint32) error + // DeleteMacIPAcl removes L2 ACL. + DeleteMacIPAcl(aclIndex uint32) error + // SetACLToInterfacesAsIngress sets ACL to all provided interfaces as ingress + SetACLToInterfacesAsIngress(ACLIndex uint32, ifIndices []uint32) error + // RemoveIPIngressACLFromInterfaces removes ACL from interfaces + RemoveIPIngressACLFromInterfaces(ACLIndex uint32, ifIndices []uint32) error + // SetACLToInterfacesAsEgress sets ACL to all provided interfaces as egress + SetACLToInterfacesAsEgress(ACLIndex uint32, ifIndices []uint32) error + // RemoveIPEgressACLFromInterfaces removes ACL from interfaces + RemoveIPEgressACLFromInterfaces(ACLIndex uint32, ifIndices []uint32) error + // SetMacIPAclToInterface adds L2 ACL to interface. + SetMacIPAclToInterface(aclIndex uint32, ifIndices []uint32) error + // RemoveMacIPIngressACLFromInterfaces removes L2 ACL from interfaces. + RemoveMacIPIngressACLFromInterfaces(removedACLIndex uint32, ifIndices []uint32) error + // DumpIPACL returns all IP-type ACLs + DumpIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*ACLEntry, error) + // DumpIPACL returns all MACIP-type ACLs + DumpMACIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*ACLEntry, error) + // DumpACLInterfaces returns a map of IP ACL indices with interfaces + DumpIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex) (map[uint32]*acl.AccessLists_Acl_Interfaces, error) + // DumpMACIPACLInterfaces returns a map of MACIP ACL indices with interfaces + DumpMACIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex) (map[uint32]*acl.AccessLists_Acl_Interfaces, error) + // DumpIPAcls returns a list of all configured ACLs with IP-type ruleData. + DumpIPAcls() (map[ACLIdentifier][]acl_api.ACLRule, error) + // DumpMacIPAcls returns a list of all configured ACL with IPMAC-type ruleData. + DumpMacIPAcls() (map[ACLIdentifier][]acl_api.MacipACLRule, error) + // DumpInterfaceAcls finds interface in VPP and returns its ACL configuration + DumpInterfaceIPAcls(swIndex uint32) (acl.AccessLists, error) + // DumpInterfaceMACIPAcls finds interface in VPP and returns its MACIP ACL configuration + DumpInterfaceMACIPAcls(swIndex uint32) (acl.AccessLists, error) + // DumpInterfaceIPACLs finds interface in VPP and returns its IP ACL configuration. + DumpInterfaceIPACLs(swIndex uint32) (*acl_api.ACLInterfaceListDetails, error) + // DumpInterfaceMACIPACLs finds interface in VPP and returns its MACIP ACL configuration. + DumpInterfaceMACIPACLs(swIndex uint32) (*acl_api.MacipACLInterfaceListDetails, error) + // DumpInterfaces finds all interfaces in VPP and returns their ACL configurations + DumpInterfaces() ([]*acl_api.ACLInterfaceListDetails, []*acl_api.MacipACLInterfaceListDetails, error) +} + +// netLinkHandler is accessor for acl-related vppcalls methods +type aclVppHandler struct { + stopwatch *measure.Stopwatch + callsChannel VPPChannel + dumpChannel VPPChannel +} + +// NewAclVppHandler creates new instance of acl vppcalls handler +func NewAclVppHandler(callsChan, dumpChan VPPChannel, stopwatch *measure.Stopwatch) *aclVppHandler { + return &aclVppHandler{ + callsChannel: callsChan, + dumpChannel: dumpChan, + stopwatch: stopwatch, + } +} diff --git a/plugins/vpp/aclplugin/vppcalls/doc.go b/plugins/vpp/aclplugin/vppcalls/doc.go index 508aa3c68b..374748917b 100644 --- a/plugins/vpp/aclplugin/vppcalls/doc.go +++ b/plugins/vpp/aclplugin/vppcalls/doc.go @@ -1,2 +1,3 @@ -// Package vppcalls contains wrappers over VPP ACL binary APIs. +// Package vppcalls contains wrappers over VPP ACL binary APIs and helpers to dump ACLs configured in VPP - per +// interface and total. package vppcalls diff --git a/plugins/vpp/aclplugin/vppdump/dump_vppcalls.go b/plugins/vpp/aclplugin/vppcalls/dump_vppcalls.go similarity index 68% rename from plugins/vpp/aclplugin/vppdump/dump_vppcalls.go rename to plugins/vpp/aclplugin/vppcalls/dump_vppcalls.go index 6a5299f80e..ac42ab0a93 100644 --- a/plugins/vpp/aclplugin/vppdump/dump_vppcalls.go +++ b/plugins/vpp/aclplugin/vppcalls/dump_vppcalls.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package vppdump +package vppcalls import ( "bytes" @@ -20,9 +20,7 @@ import ( "net" "time" - "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/logging/measure" acl_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/acl" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" @@ -57,13 +55,12 @@ type ACLToInterface struct { EgressACL []uint32 } -func DumpIPACL(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChannel VPPChannel, - stopwatch *measure.Stopwatch) ([]*ACLEntry, error) { +func (handler *aclVppHandler) DumpIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*ACLEntry, error) { ruleIPData := make(map[ACLIdentifier][]*acl.AccessLists_Acl_Rule) // get all ACLs with IP ruleData - IPRuleACLs, err := DumpIPAcls(log, vppChannel, stopwatch) + IPRuleACLs, err := handler.DumpIPAcls() if len(IPRuleACLs) < 1 || err != nil { return nil, err } @@ -76,11 +73,9 @@ func DumpIPACL(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChannel VP if len(IPRules) > 0 { for _, IPRule := range IPRules { - ruleDetails, err := getIPRuleDetails(IPRule) + ruleDetails, err := handler.getIPRuleDetails(IPRule) if err != nil { - log.Error(err) - wasErr = err - break + return nil, fmt.Errorf("failed to get IP Rule %v details: %v", IPRule, err) } rulesDetails = append(rulesDetails, ruleDetails) } @@ -95,7 +90,7 @@ func DumpIPACL(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChannel VP } // Get all ACL indices with ingress and egress interfaces - interfaceData, err := DumpIPACLInterfaces(indices, swIfIndices, log, vppChannel, stopwatch) + interfaceData, err := handler.DumpIPACLInterfaces(indices, swIfIndices) if err != nil { return nil, err } @@ -103,7 +98,6 @@ func DumpIPACL(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChannel VP var ACLs []*ACLEntry // Build a list of ACL ruleData with ruleData, interfaces, index and tag (name) for identifier, rules := range ruleIPData { - log.Info("acl index : %v", identifier.ACLIndex) ACLs = append(ACLs, &ACLEntry{ Identifier: &ACLIdentifier{ ACLIndex: identifier.ACLIndex, @@ -120,13 +114,12 @@ func DumpIPACL(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChannel VP return ACLs, wasErr } -func DumpMACIPACL(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChannel VPPChannel, - stopwatch *measure.Stopwatch) ([]*ACLEntry, error) { +func (handler *aclVppHandler) DumpMACIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*ACLEntry, error) { ruleMACIPData := make(map[ACLIdentifier][]*acl.AccessLists_Acl_Rule) // get all ACLs with MACIP ruleData - MACIPRuleACLs, err := DumpMacIPAcls(log, vppChannel, stopwatch) + MACIPRuleACLs, err := handler.DumpMacIPAcls() if len(MACIPRuleACLs) < 1 || err != nil { return nil, err } @@ -138,11 +131,9 @@ func DumpMACIPACL(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChannel if len(MACIPRules) > 0 { for _, MACIPRule := range MACIPRules { - ruleDetails, err := getMACIPRuleDetails(MACIPRule) + ruleDetails, err := handler.getMACIPRuleDetails(MACIPRule) if err != nil { - log.Error(err) - wasErr = err - break + return nil, fmt.Errorf("failed to get MACIP Rule %v details: %v", MACIPRule, err) } rulesDetails = append(rulesDetails, ruleDetails) } @@ -157,7 +148,7 @@ func DumpMACIPACL(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChannel } // Get all ACL indices with ingress and egress interfaces - interfaceData, err := DumpMACIPACLInterfaces(indices, swIfIndices, log, vppChannel, stopwatch) + interfaceData, err := handler.DumpMACIPACLInterfaces(indices, swIfIndices) if err != nil { return nil, err } @@ -180,11 +171,9 @@ func DumpMACIPACL(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChannel return ACLs, wasErr } -// DumpACLInterfaces returns a map of IP ACL indices with interfaces -func DumpIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChannel VPPChannel, - stopwatch *measure.Stopwatch) (map[uint32]*acl.AccessLists_Acl_Interfaces, error) { +func (handler *aclVppHandler) DumpIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex) (map[uint32]*acl.AccessLists_Acl_Interfaces, error) { defer func(start time.Time) { - stopwatch.TimeLog(&acl_api.ACLInterfaceListDump{}).LogTimeEntry(time.Since(start)) + handler.stopwatch.TimeLog(&acl_api.ACLInterfaceListDump{}).LogTimeEntry(time.Since(start)) }(time.Now()) // list of ACL-to-interfaces @@ -199,7 +188,7 @@ func DumpIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex, log l msgIP := &acl_api.ACLInterfaceListDump{ SwIfIndex: 0xffffffff, // dump all } - reqIP := vppChannel.SendMultiRequest(msgIP) + reqIP := handler.dumpChannel.SendMultiRequest(msgIP) for { replyIP := &acl_api.ACLInterfaceListDetails{} stop, err := reqIP.ReceiveReply(replyIP) @@ -207,9 +196,7 @@ func DumpIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex, log l break } if err != nil { - log.Error(err) - wasErr = err - break + return aclsWithInterfaces, fmt.Errorf("ACL interface list dump reply error: %v", err) } if replyIP.Count > 0 { @@ -237,7 +224,6 @@ func DumpIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex, log l if ingressACLIdx == aclIdx { name, _, found := swIfIndices.LookupName(data.SwIfIdx) if !found { - log.Warnf("ACL requires ingress interface with Idx %v which was not found in the mapping", data.SwIfIdx) continue } ingress = append(ingress, name) @@ -248,7 +234,6 @@ func DumpIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex, log l if egressACLIdx == aclIdx { name, _, found := swIfIndices.LookupName(data.SwIfIdx) if !found { - log.Warnf("ACL requires egress interface with Idx %v which was not found in the mapping", data.SwIfIdx) continue } egress = append(egress, name) @@ -265,12 +250,9 @@ func DumpIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex, log l return aclsWithInterfaces, wasErr } -// DumpMACIPACLInterfaces returns a map of MACIP ACL indices with interfaces -func DumpMACIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChannel VPPChannel, - stopwatch *measure.Stopwatch) ( - map[uint32]*acl.AccessLists_Acl_Interfaces, error) { +func (handler *aclVppHandler) DumpMACIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex) (map[uint32]*acl.AccessLists_Acl_Interfaces, error) { defer func(start time.Time) { - stopwatch.TimeLog(acl_api.ACLInterfaceListDump{}).LogTimeEntry(time.Since(start)) + handler.stopwatch.TimeLog(acl_api.ACLInterfaceListDump{}).LogTimeEntry(time.Since(start)) }(time.Now()) // list of ACL-to-interfaces @@ -285,7 +267,7 @@ func DumpMACIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex, lo msgMACIP := &acl_api.MacipACLInterfaceListDump{ SwIfIndex: 0xffffffff, // dump all } - reqMACIP := vppChannel.SendMultiRequest(msgMACIP) + reqMACIP := handler.dumpChannel.SendMultiRequest(msgMACIP) for { replyMACIP := &acl_api.MacipACLInterfaceListDetails{} stop, err := reqMACIP.ReceiveReply(replyMACIP) @@ -293,9 +275,7 @@ func DumpMACIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex, lo break } if err != nil { - log.Error(err) - wasErr = err - break + return aclsWithInterfaces, fmt.Errorf("MACIP ACL interface list dump reply error: %v", err) } if replyMACIP.Count > 0 { data := &ACLToInterface{ @@ -316,7 +296,6 @@ func DumpMACIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex, lo if ingressACLIdx == aclIdx { name, _, found := swIfIndices.LookupName(data.SwIfIdx) if !found { - log.Warnf("ACL requires ingress interface with Idx %v which was not found in the mapping", data.SwIfIdx) continue } ingress = append(ingress, name) @@ -332,11 +311,9 @@ func DumpMACIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex, lo return aclsWithInterfaces, wasErr } -// DumpIPAcls returns a list of all configured ACLs with IP-type ruleData. -func DumpIPAcls(log logging.Logger, vch VPPChannel, - stopwatch *measure.Stopwatch) (map[ACLIdentifier][]acl_api.ACLRule, error) { +func (handler *aclVppHandler) DumpIPAcls() (map[ACLIdentifier][]acl_api.ACLRule, error) { defer func(start time.Time) { - stopwatch.TimeLog(acl_api.ACLDump{}).LogTimeEntry(time.Since(start)) + handler.stopwatch.TimeLog(acl_api.ACLDump{}).LogTimeEntry(time.Since(start)) }(time.Now()) aclIPRules := make(map[ACLIdentifier][]acl_api.ACLRule) @@ -344,14 +321,12 @@ func DumpIPAcls(log logging.Logger, vch VPPChannel, req := &acl_api.ACLDump{} req.ACLIndex = 0xffffffff - reqContext := vch.SendMultiRequest(req) + reqContext := handler.dumpChannel.SendMultiRequest(req) for { msg := &acl_api.ACLDetails{} stop, err := reqContext.ReceiveReply(msg) if err != nil { - log.Error(err) - wasErr = err - break + return aclIPRules, fmt.Errorf("ACL dump reply error: %v", err) } if stop { break @@ -368,11 +343,9 @@ func DumpIPAcls(log logging.Logger, vch VPPChannel, return aclIPRules, wasErr } -// DumpMacIPAcls returns a list of all configured ACL with IPMAC-type ruleData. -func DumpMacIPAcls(log logging.Logger, vppChannel VPPChannel, - stopwatch *measure.Stopwatch) (map[ACLIdentifier][]acl_api.MacipACLRule, error) { +func (handler *aclVppHandler) DumpMacIPAcls() (map[ACLIdentifier][]acl_api.MacipACLRule, error) { defer func(start time.Time) { - stopwatch.TimeLog(acl_api.MacipACLDump{}).LogTimeEntry(time.Since(start)) + handler.stopwatch.TimeLog(acl_api.MacipACLDump{}).LogTimeEntry(time.Since(start)) }(time.Now()) aclMACIPRules := make(map[ACLIdentifier][]acl_api.MacipACLRule) @@ -380,14 +353,12 @@ func DumpMacIPAcls(log logging.Logger, vppChannel VPPChannel, req := &acl_api.MacipACLDump{} req.ACLIndex = 0xffffffff - reqContext := vppChannel.SendMultiRequest(req) + reqContext := handler.dumpChannel.SendMultiRequest(req) for { msg := &acl_api.MacipACLDetails{} stop, err := reqContext.ReceiveReply(msg) if err != nil { - log.Error(err) - wasErr = err - break + return aclMACIPRules, fmt.Errorf("ACL MACIP dump reply error: %v", err) } if stop { break @@ -403,16 +374,148 @@ func DumpMacIPAcls(log logging.Logger, vppChannel VPPChannel, return aclMACIPRules, wasErr } -func getIPRuleDetails(rule acl_api.ACLRule) (*acl.AccessLists_Acl_Rule, error) { +func (handler *aclVppHandler) DumpInterfaceIPAcls(swIndex uint32) (acl.AccessLists, error) { + allACLs := acl.AccessLists{ + Acls: []*acl.AccessLists_Acl{}, + } + + res, err := handler.DumpInterfaceIPACLs(swIndex) + if err != nil { + return allACLs, err + } + + if res.SwIfIndex != swIndex { + return allACLs, fmt.Errorf("returned interface index %d does not match request", res.SwIfIndex) + } + + for aidx := range res.Acls { + ipACL, err := handler.getIPACLDetails(uint32(aidx)) + if err != nil { + return allACLs, err + } else { + allACLs.Acls = append(allACLs.Acls, ipACL) + } + } + return allACLs, nil +} + +func (handler *aclVppHandler) DumpInterfaceMACIPAcls(swIndex uint32) (acl.AccessLists, error) { + allACLs := acl.AccessLists{ + Acls: []*acl.AccessLists_Acl{}, + } + + resMacIp, err := handler.DumpInterfaceMACIPACLs(swIndex) + if err != nil { + return allACLs, err + } + + if resMacIp.SwIfIndex != swIndex { + return allACLs, fmt.Errorf("returned interface index %d does not match request", resMacIp.SwIfIndex) + } + + for aidx := range resMacIp.Acls { + macipACL, err := handler.getMACIPACLDetails(uint32(aidx)) + if err != nil { + return allACLs, err + } else { + allACLs.Acls = append(allACLs.Acls, macipACL) + } + } + return allACLs, nil +} + +func (handler *aclVppHandler) DumpInterfaceIPACLs(swIndex uint32) (*acl_api.ACLInterfaceListDetails, error) { + defer func(t time.Time) { + handler.stopwatch.TimeLog(acl_api.ACLInterfaceListDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) + + req := &acl_api.ACLInterfaceListDump{ + SwIfIndex: swIndex, + } + + reply := &acl_api.ACLInterfaceListDetails{} + if err := handler.dumpChannel.SendRequest(req).ReceiveReply(reply); err != nil { + return nil, err + } + + return reply, nil +} + +func (handler *aclVppHandler) DumpInterfaceMACIPACLs(swIndex uint32) (*acl_api.MacipACLInterfaceListDetails, error) { + defer func(t time.Time) { + handler.stopwatch.TimeLog(acl_api.MacipACLInterfaceListDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) + + req := &acl_api.MacipACLInterfaceListDump{ + SwIfIndex: swIndex, + } + + reply := &acl_api.MacipACLInterfaceListDetails{} + if err := handler.dumpChannel.SendRequest(req).ReceiveReply(reply); err != nil { + return nil, err + } + + return reply, nil +} + +func (handler *aclVppHandler) DumpInterfaces() ([]*acl_api.ACLInterfaceListDetails, []*acl_api.MacipACLInterfaceListDetails, error) { + defer func(t time.Time) { + handler.stopwatch.TimeLog(acl_api.ACLInterfaceListDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) + + msgIPACL := &acl_api.ACLInterfaceListDump{ + SwIfIndex: 0xffffffff, // dump all + } + + reqIPACL := handler.dumpChannel.SendMultiRequest(msgIPACL) + + var IPaclInterfaces []*acl_api.ACLInterfaceListDetails + for { + reply := &acl_api.ACLInterfaceListDetails{} + stop, err := reqIPACL.ReceiveReply(reply) + if stop { + break + } + if err != nil { + logrus.DefaultLogger().Error(err) + return nil, nil, err + } + IPaclInterfaces = append(IPaclInterfaces, reply) + } + + msgMACIPACL := &acl_api.ACLInterfaceListDump{ + SwIfIndex: 0xffffffff, // dump all + } + + reqMACIPACL := handler.dumpChannel.SendMultiRequest(msgMACIPACL) + + var MACIPaclInterfaces []*acl_api.MacipACLInterfaceListDetails + for { + reply := &acl_api.MacipACLInterfaceListDetails{} + stop, err := reqMACIPACL.ReceiveReply(reply) + if stop { + break + } + if err != nil { + logrus.DefaultLogger().Error(err) + return nil, nil, err + } + MACIPaclInterfaces = append(MACIPaclInterfaces, reply) + } + + return IPaclInterfaces, MACIPaclInterfaces, nil +} + +func (handler *aclVppHandler) getIPRuleDetails(rule acl_api.ACLRule) (*acl.AccessLists_Acl_Rule, error) { // Resolve rule actions - aclAction, err := resolveRuleAction(rule.IsPermit) + aclAction, err := handler.resolveRuleAction(rule.IsPermit) if err != nil { return nil, err } // Resolve rule matches match := &acl.AccessLists_Acl_Rule_Match{ - IpRule: getIPRuleMatches(rule), + IpRule: handler.getIPRuleMatches(rule), } return &acl.AccessLists_Acl_Rule{ @@ -423,13 +526,13 @@ func getIPRuleDetails(rule acl_api.ACLRule) (*acl.AccessLists_Acl_Rule, error) { // getIPACLDetails gets details for a given IP ACL from VPP and translates // them from the binary VPP API format into the ACL Plugin's NB format. -func getIPACLDetails(vppChannel VPPChannel, idx uint32) (aclRule *acl.AccessLists_Acl, err error) { +func (handler *aclVppHandler) getIPACLDetails(idx uint32) (aclRule *acl.AccessLists_Acl, err error) { req := &acl_api.ACLDump{ ACLIndex: uint32(idx), } reply := &acl_api.ACLDetails{} - if err := vppChannel.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.dumpChannel.SendRequest(req).ReceiveReply(reply); err != nil { return nil, err } @@ -437,13 +540,13 @@ func getIPACLDetails(vppChannel VPPChannel, idx uint32) (aclRule *acl.AccessList for _, r := range reply.R { rule := &acl.AccessLists_Acl_Rule{} - ipRule, _ := getIPRuleDetails(r) + ipRule, _ := handler.getIPRuleDetails(r) match := &acl.AccessLists_Acl_Rule_Match{ IpRule: ipRule.GetMatch().GetIpRule(), } - aclAction, err := resolveRuleAction(r.IsPermit) + aclAction, err := handler.resolveRuleAction(r.IsPermit) if err != nil { return nil, err } @@ -456,16 +559,16 @@ func getIPACLDetails(vppChannel VPPChannel, idx uint32) (aclRule *acl.AccessList return &acl.AccessLists_Acl{Rules: ruleData, AclName: string(bytes.SplitN(reply.Tag, []byte{0x00}, 2)[0])}, nil } -func getMACIPRuleDetails(rule acl_api.MacipACLRule) (*acl.AccessLists_Acl_Rule, error) { +func (handler *aclVppHandler) getMACIPRuleDetails(rule acl_api.MacipACLRule) (*acl.AccessLists_Acl_Rule, error) { // Resolve rule actions - aclAction, err := resolveRuleAction(rule.IsPermit) + aclAction, err := handler.resolveRuleAction(rule.IsPermit) if err != nil { return nil, err } // Resolve rule matches match := &acl.AccessLists_Acl_Rule_Match{ - MacipRule: getMACIPRuleMatches(rule), + MacipRule: handler.getMACIPRuleMatches(rule), } return &acl.AccessLists_Acl_Rule{ @@ -476,13 +579,13 @@ func getMACIPRuleDetails(rule acl_api.MacipACLRule) (*acl.AccessLists_Acl_Rule, // getMACIPACLDetails gets details for a given MACIP ACL from VPP and translates // them from the binary VPP API format into the ACL Plugin's NB format. -func getMACIPACLDetails(vppChannel VPPChannel, idx uint32) (aclRule *acl.AccessLists_Acl, err error) { +func (handler *aclVppHandler) getMACIPACLDetails(idx uint32) (aclRule *acl.AccessLists_Acl, err error) { req := &acl_api.MacipACLDump{ ACLIndex: uint32(idx), } reply := &acl_api.MacipACLDetails{} - if err := vppChannel.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.dumpChannel.SendRequest(req).ReceiveReply(reply); err != nil { return nil, err } @@ -490,13 +593,13 @@ func getMACIPACLDetails(vppChannel VPPChannel, idx uint32) (aclRule *acl.AccessL for _, r := range reply.R { rule := &acl.AccessLists_Acl_Rule{} - ipRule, _ := getMACIPRuleDetails(r) + ipRule, _ := handler.getMACIPRuleDetails(r) match := &acl.AccessLists_Acl_Rule_Match{ IpRule: ipRule.GetMatch().GetIpRule(), } - aclAction, err := resolveRuleAction(r.IsPermit) + aclAction, err := handler.resolveRuleAction(r.IsPermit) if err != nil { return nil, err } @@ -511,7 +614,7 @@ func getMACIPACLDetails(vppChannel VPPChannel, idx uint32) (aclRule *acl.AccessL // getIPRuleMatches translates an IP rule from the binary VPP API format into the // ACL Plugin's NB format -func getIPRuleMatches(r acl_api.ACLRule) *acl.AccessLists_Acl_Rule_Match_IpRule { +func (handler *aclVppHandler) getIPRuleMatches(r acl_api.ACLRule) *acl.AccessLists_Acl_Rule_Match_IpRule { var srcIP, dstIP string if r.IsIpv6 == 1 { srcIP = net.IP(r.SrcIPAddr).To16().String() @@ -530,18 +633,18 @@ func getIPRuleMatches(r acl_api.ACLRule) *acl.AccessLists_Acl_Rule_Match_IpRule switch r.Proto { case TCPProto: - ipRule.Tcp = getTCPMatchRule(r) + ipRule.Tcp = handler.getTCPMatchRule(r) case UDPProto: - ipRule.Udp = getUDPMatchRule(r) + ipRule.Udp = handler.getUDPMatchRule(r) case ICMPv4Proto, ICMPv6Proto: - ipRule.Icmp = getIcmpMatchRule(r) + ipRule.Icmp = handler.getIcmpMatchRule(r) } return ipRule } // getMACIPRuleMatches translates an MACIP rule from the binary VPP API format into the // ACL Plugin's NB format -func getMACIPRuleMatches(rule acl_api.MacipACLRule) *acl.AccessLists_Acl_Rule_Match_MacIpRule { +func (handler *aclVppHandler) getMACIPRuleMatches(rule acl_api.MacipACLRule) *acl.AccessLists_Acl_Rule_Match_MacIpRule { var srcAddr string if rule.IsIpv6 == 1 { srcAddr = net.IP(rule.SrcIPAddr).To16().String() @@ -558,7 +661,7 @@ func getMACIPRuleMatches(rule acl_api.MacipACLRule) *acl.AccessLists_Acl_Rule_Ma // getTCPMatchRule translates a TCP match rule from the binary VPP API format // into the ACL Plugin's NB format -func getTCPMatchRule(r acl_api.ACLRule) *acl.AccessLists_Acl_Rule_Match_IpRule_Tcp { +func (handler *aclVppHandler) getTCPMatchRule(r acl_api.ACLRule) *acl.AccessLists_Acl_Rule_Match_IpRule_Tcp { dstPortRange := &acl.AccessLists_Acl_Rule_Match_IpRule_PortRange{ LowerPort: uint32(r.DstportOrIcmpcodeFirst), UpperPort: uint32(r.DstportOrIcmpcodeLast), @@ -578,7 +681,7 @@ func getTCPMatchRule(r acl_api.ACLRule) *acl.AccessLists_Acl_Rule_Match_IpRule_T // getUDPMatchRule translates a UDP match rule from the binary VPP API format // into the ACL Plugin's NB format -func getUDPMatchRule(r acl_api.ACLRule) *acl.AccessLists_Acl_Rule_Match_IpRule_Udp { +func (handler *aclVppHandler) getUDPMatchRule(r acl_api.ACLRule) *acl.AccessLists_Acl_Rule_Match_IpRule_Udp { dstPortRange := &acl.AccessLists_Acl_Rule_Match_IpRule_PortRange{ LowerPort: uint32(r.DstportOrIcmpcodeFirst), UpperPort: uint32(r.DstportOrIcmpcodeLast), @@ -596,7 +699,7 @@ func getUDPMatchRule(r acl_api.ACLRule) *acl.AccessLists_Acl_Rule_Match_IpRule_U // getIcmpMatchRule translates an ICMP match rule from the binary VPP API // format into the ACL Plugin's NB format -func getIcmpMatchRule(r acl_api.ACLRule) *acl.AccessLists_Acl_Rule_Match_IpRule_Icmp { +func (handler *aclVppHandler) getIcmpMatchRule(r acl_api.ACLRule) *acl.AccessLists_Acl_Rule_Match_IpRule_Icmp { icmp := &acl.AccessLists_Acl_Rule_Match_IpRule_Icmp{ Icmpv6: r.IsIpv6 > 0, IcmpCodeRange: &acl.AccessLists_Acl_Rule_Match_IpRule_Icmp_Range{}, @@ -606,7 +709,7 @@ func getIcmpMatchRule(r acl_api.ACLRule) *acl.AccessLists_Acl_Rule_Match_IpRule_ } // Returns rule action representation in model according to the vpp input -func resolveRuleAction(isPermit uint8) (acl.AclAction, error) { +func (handler *aclVppHandler) resolveRuleAction(isPermit uint8) (acl.AclAction, error) { switch isPermit { case 0: return acl.AclAction_DENY, nil @@ -618,143 +721,3 @@ func resolveRuleAction(isPermit uint8) (acl.AclAction, error) { return acl.AclAction_DENY, fmt.Errorf("invalid match rule %d", isPermit) } } - -// DumpInterfaceAcls finds interface in VPP and returns its ACL configuration -//func DumpInterfaceIPAclsDetails(log logging.Logger, swIndex uint32, vppChannel VPPChannel, stopwatch *measure.Stopwatch) (acl.AccessLists, error) { -func DumpInterfaceIPAcls(log logging.Logger, swIndex uint32, vppChannel VPPChannel, stopwatch *measure.Stopwatch) (acl.AccessLists, error) { - alAcls := acl.AccessLists{ - Acls: []*acl.AccessLists_Acl{}, - } - - res, err := DumpInterfaceIPACLs(swIndex, vppChannel, stopwatch) - log.Infof("Res: %+v\n", res) - if err != nil { - return alAcls, err - } - - if res.SwIfIndex != swIndex { - return alAcls, fmt.Errorf("returned interface index %d does not match request", res.SwIfIndex) - } - - for aidx := range res.Acls { - ipACL, err := getIPACLDetails(vppChannel, uint32(aidx)) - if err != nil { - log.Error(err) - } else { - alAcls.Acls = append(alAcls.Acls, ipACL) - } - } - return alAcls, nil -} - -// DumpInterfaceMACIPAcls finds interface in VPP and returns its MACIP ACL configuration -func DumpInterfaceMACIPAcls(log logging.Logger, swIndex uint32, vppChannel VPPChannel, stopwatch *measure.Stopwatch) (acl.AccessLists, error) { - alAcls := acl.AccessLists{ - Acls: []*acl.AccessLists_Acl{}, - } - - resMacIp, err := DumpInterfaceMACIPACLs(swIndex, vppChannel, stopwatch) - log.Infof("Res: %+v\n", resMacIp) - if err != nil { - return alAcls, err - } - - if resMacIp.SwIfIndex != swIndex { - return alAcls, fmt.Errorf("returned interface index %d does not match request", resMacIp.SwIfIndex) - } - - for aidx := range resMacIp.Acls { - macipACL, err := getMACIPACLDetails(vppChannel, uint32(aidx)) - if err != nil { - log.Error(err) - } else { - alAcls.Acls = append(alAcls.Acls, macipACL) - } - } - return alAcls, nil -} - -// DumpInterface finds interface in VPP and returns its IP ACL configuration. -func DumpInterfaceIPACLs(swIndex uint32, vppChannel VPPChannel, stopwatch *measure.Stopwatch) (*acl_api.ACLInterfaceListDetails, error) { - defer func(t time.Time) { - stopwatch.TimeLog(acl_api.ACLInterfaceListDump{}).LogTimeEntry(time.Since(t)) - }(time.Now()) - - req := &acl_api.ACLInterfaceListDump{ - SwIfIndex: swIndex, - } - - reply := &acl_api.ACLInterfaceListDetails{} - if err := vppChannel.SendRequest(req).ReceiveReply(reply); err != nil { - return nil, err - } - - return reply, nil -} - -// DumpInterface finds interface in VPP and returns its MACIP ACL configuration. -func DumpInterfaceMACIPACLs(swIndex uint32, vppChannel VPPChannel, stopwatch *measure.Stopwatch) (*acl_api.MacipACLInterfaceListDetails, error) { - defer func(t time.Time) { - stopwatch.TimeLog(acl_api.MacipACLInterfaceListDump{}).LogTimeEntry(time.Since(t)) - }(time.Now()) - - req := &acl_api.MacipACLInterfaceListDump{ - SwIfIndex: swIndex, - } - - reply := &acl_api.MacipACLInterfaceListDetails{} - if err := vppChannel.SendRequest(req).ReceiveReply(reply); err != nil { - return nil, err - } - - return reply, nil -} - -// DumpInterfaces finds all interfaces in VPP and returns their ACL configurations -func DumpInterfaces(vppChannel VPPChannel, stopwatch *measure.Stopwatch) ([]*acl_api.ACLInterfaceListDetails, []*acl_api.MacipACLInterfaceListDetails, error) { - defer func(t time.Time) { - stopwatch.TimeLog(acl_api.ACLInterfaceListDump{}).LogTimeEntry(time.Since(t)) - }(time.Now()) - - msgIPACL := &acl_api.ACLInterfaceListDump{ - SwIfIndex: 0xffffffff, // dump all - } - - reqIPACL := vppChannel.SendMultiRequest(msgIPACL) - - var IPaclInterfaces []*acl_api.ACLInterfaceListDetails - for { - reply := &acl_api.ACLInterfaceListDetails{} - stop, err := reqIPACL.ReceiveReply(reply) - if stop { - break - } - if err != nil { - logrus.DefaultLogger().Error(err) - return nil, nil, err - } - IPaclInterfaces = append(IPaclInterfaces, reply) - } - - msgMACIPACL := &acl_api.ACLInterfaceListDump{ - SwIfIndex: 0xffffffff, // dump all - } - - reqMACIPACL := vppChannel.SendMultiRequest(msgMACIPACL) - - var MACIPaclInterfaces []*acl_api.MacipACLInterfaceListDetails - for { - reply := &acl_api.MacipACLInterfaceListDetails{} - stop, err := reqMACIPACL.ReceiveReply(reply) - if stop { - break - } - if err != nil { - logrus.DefaultLogger().Error(err) - return nil, nil, err - } - MACIPaclInterfaces = append(MACIPaclInterfaces, reply) - } - - return IPaclInterfaces, MACIPaclInterfaces, nil -} diff --git a/plugins/vpp/aclplugin/vppdump/dump_vppcalls_test.go b/plugins/vpp/aclplugin/vppcalls/dump_vppcalls_test.go similarity index 84% rename from plugins/vpp/aclplugin/vppdump/dump_vppcalls_test.go rename to plugins/vpp/aclplugin/vppcalls/dump_vppcalls_test.go index 9f8f94fb33..a14ea3b1e4 100644 --- a/plugins/vpp/aclplugin/vppdump/dump_vppcalls_test.go +++ b/plugins/vpp/aclplugin/vppcalls/dump_vppcalls_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package vppdump +package vppcalls import ( "testing" @@ -28,7 +28,9 @@ import ( // Test translation of IP rule into ACL Plugin's format func TestGetIPRuleMatch(t *testing.T) { - icmpV4Rule := getIPRuleMatches(acl_api.ACLRule{ + aclHandler := NewAclVppHandler(nil, nil, nil) + + icmpV4Rule := aclHandler.getIPRuleMatches(acl_api.ACLRule{ SrcIPAddr: []byte{10, 0, 0, 1}, SrcIPPrefixLen: 24, DstIPAddr: []byte{20, 0, 0, 1}, @@ -39,7 +41,7 @@ func TestGetIPRuleMatch(t *testing.T) { t.Fatal("should have icmp match") } - icmpV6Rule := getIPRuleMatches(acl_api.ACLRule{ + icmpV6Rule := aclHandler.getIPRuleMatches(acl_api.ACLRule{ IsIpv6: 1, SrcIPAddr: []byte{'d', 'e', 'd', 'd', 1}, SrcIPPrefixLen: 64, @@ -51,7 +53,7 @@ func TestGetIPRuleMatch(t *testing.T) { t.Fatal("should have icmpv6 match") } - tcpRule := getIPRuleMatches(acl_api.ACLRule{ + tcpRule := aclHandler.getIPRuleMatches(acl_api.ACLRule{ SrcIPAddr: []byte{10, 0, 0, 1}, SrcIPPrefixLen: 24, DstIPAddr: []byte{20, 0, 0, 1}, @@ -62,7 +64,7 @@ func TestGetIPRuleMatch(t *testing.T) { t.Fatal("should have tcp match") } - udpRule := getIPRuleMatches(acl_api.ACLRule{ + udpRule := aclHandler.getIPRuleMatches(acl_api.ACLRule{ SrcIPAddr: []byte{10, 0, 0, 1}, SrcIPPrefixLen: 24, DstIPAddr: []byte{20, 0, 0, 1}, @@ -76,7 +78,9 @@ func TestGetIPRuleMatch(t *testing.T) { // Test translation of MACIP rule into ACL Plugin's format func TestGetMACIPRuleMatches(t *testing.T) { - macipV4Rule := getMACIPRuleMatches(acl_api.MacipACLRule{ + aclHandler := NewAclVppHandler(nil, nil, nil) + + macipV4Rule := aclHandler.getMACIPRuleMatches(acl_api.MacipACLRule{ IsPermit: 1, SrcMac: []byte{2, 'd', 'e', 'a', 'd', 2}, SrcMacMask: []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, @@ -86,7 +90,7 @@ func TestGetMACIPRuleMatches(t *testing.T) { if macipV4Rule.GetSourceMacAddress() == "" { t.Fatal("should have mac match") } - macipV6Rule := getMACIPRuleMatches(acl_api.MacipACLRule{ + macipV6Rule := aclHandler.getMACIPRuleMatches(acl_api.MacipACLRule{ IsPermit: 0, IsIpv6: 1, SrcMac: []byte{2, 'd', 'e', 'a', 'd', 2}, @@ -132,10 +136,12 @@ func TestDumpIPACL(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test", nil)) swIfIndexes.RegisterName("if0", 1, nil) - ifaces, err := DumpIPACL(swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + ifaces, err := aclHandler.DumpIPACL(swIfIndexes) Expect(err).To(Succeed()) Expect(ifaces).To(HaveLen(3)) //Expect(ifaces[0].Identifier.ACLIndex).To(Equal(uint32(0))) @@ -176,10 +182,12 @@ func TestDumpMACIPACL(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test", nil)) swIfIndexes.RegisterName("if0", 1, nil) - ifaces, err := DumpMACIPACL(swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + ifaces, err := aclHandler.DumpMACIPACL(swIfIndexes) Expect(err).To(Succeed()) Expect(ifaces).To(HaveLen(3)) //Expect(ifaces[0].Identifier.ACLIndex).To(Equal(uint32(0))) @@ -201,11 +209,13 @@ func TestDumpACLInterfaces(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test", nil)) swIfIndexes.RegisterName("if0", 1, nil) indexes := []uint32{0, 2} - ifaces, err := DumpIPACLInterfaces(indexes, swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + ifaces, err := aclHandler.DumpIPACLInterfaces(indexes, swIfIndexes) Expect(err).To(Succeed()) Expect(ifaces).To(HaveLen(2)) Expect(ifaces[0].Ingress).To(Equal([]string{"if0"})) @@ -224,11 +234,13 @@ func TestDumpMACIPACLInterfaces(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test-sw_if_indexes", ifaceidx.IndexMetadata)) swIfIndexes.RegisterName("if0", 1, nil) indexes := []uint32{0, 1} - ifaces, err := DumpMACIPACLInterfaces(indexes, swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + ifaces, err := aclHandler.DumpMACIPACLInterfaces(indexes, swIfIndexes) Expect(err).To(Succeed()) Expect(ifaces).To(HaveLen(2)) Expect(ifaces[0].Ingress).To(Equal([]string{"if0"})) @@ -249,7 +261,9 @@ func TestDumpIPAcls(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - IPRuleACLs, err := DumpIPAcls(logrus.DefaultLogger(), ctx.MockChannel, nil) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + + IPRuleACLs, err := aclHandler.DumpIPAcls() Expect(err).To(Succeed()) Expect(IPRuleACLs).To(HaveLen(1)) } @@ -266,7 +280,9 @@ func TestDumpMacIPAcls(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - MacIPRuleACLs, err := DumpMacIPAcls(logrus.DefaultLogger(), ctx.MockChannel, nil) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + + MacIPRuleACLs, err := aclHandler.DumpMacIPAcls() Expect(err).To(Succeed()) Expect(MacIPRuleACLs).To(HaveLen(1)) } @@ -292,7 +308,9 @@ func TestDumpInterfaceIPAcls(t *testing.T) { R: []acl_api.ACLRule{{IsPermit: 2}, {IsPermit: 0}}, }) - ACLs, err := DumpInterfaceIPAcls(logrus.DefaultLogger(), 0, ctx.MockChannel, nil) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + + ACLs, err := aclHandler.DumpInterfaceIPAcls(0) Expect(err).To(Succeed()) Expect(ACLs.Acls).To(HaveLen(2)) } @@ -317,7 +335,9 @@ func TestDumpInterfaceMACIPAcls(t *testing.T) { R: []acl_api.MacipACLRule{{IsPermit: 2}, {IsPermit: 1}}, }) - ACLs, err := DumpInterfaceMACIPAcls(logrus.DefaultLogger(), 0, ctx.MockChannel, nil) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + + ACLs, err := aclHandler.DumpInterfaceMACIPAcls(0) Expect(err).To(Succeed()) Expect(ACLs.Acls).To(HaveLen(2)) } @@ -326,18 +346,20 @@ func TestDumpInterface(t *testing.T) { ctx := vppcallmock.SetupTestCtx(t) defer ctx.TeardownTestCtx() + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + ctx.MockVpp.MockReply(&acl_api.ACLInterfaceListDetails{ SwIfIndex: 0, Count: 2, NInput: 1, Acls: []uint32{0, 1}, }) - IPacls, err := DumpInterfaceIPACLs(0, ctx.MockChannel, nil) + IPacls, err := aclHandler.DumpInterfaceIPACLs(0) Expect(err).To(BeNil()) Expect(IPacls.Acls).To(HaveLen(2)) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceListDetails{}) - IPacls, err = DumpInterfaceIPACLs(0, ctx.MockChannel, nil) + IPacls, err = aclHandler.DumpInterfaceIPACLs(0) Expect(err).To(BeNil()) Expect(IPacls.Acls).To(HaveLen(0)) @@ -346,12 +368,12 @@ func TestDumpInterface(t *testing.T) { Count: 2, Acls: []uint32{0, 1}, }) - MACIPacls, err := DumpInterfaceMACIPACLs(0, ctx.MockChannel, nil) + MACIPacls, err := aclHandler.DumpInterfaceMACIPACLs(0) Expect(err).To(BeNil()) Expect(MACIPacls.Acls).To(HaveLen(2)) ctx.MockVpp.MockReply(&acl_api.MacipACLInterfaceListDetails{}) - MACIPacls, err = DumpInterfaceMACIPACLs(0, ctx.MockChannel, nil) + MACIPacls, err = aclHandler.DumpInterfaceMACIPACLs(0) Expect(err).To(BeNil()) Expect(MACIPacls.Acls).To(HaveLen(0)) } @@ -392,7 +414,9 @@ func TestDumpInterfaces(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - IPacls, MACIPacls, err := DumpInterfaces(ctx.MockChannel, nil) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + + IPacls, MACIPacls, err := aclHandler.DumpInterfaces() Expect(err).To(BeNil()) Expect(IPacls).To(HaveLen(3)) Expect(MACIPacls).To(HaveLen(2)) diff --git a/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls.go b/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls.go index cf306f5e59..5a95cb7f2c 100644 --- a/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls.go +++ b/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls.go @@ -18,11 +18,8 @@ import ( "fmt" "time" - "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/measure" - "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppdump" acl_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/acl" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" ) // ACLInterfaceLogicalReq groups multiple fields to not enumerate all of them in one function call @@ -32,76 +29,106 @@ type ACLInterfaceLogicalReq struct { ingress bool } -// ACLInterfacesVppCalls aggregates vpp calls related to the IP ACL interfaces -type ACLInterfacesVppCalls struct { - log logging.Logger - vppChan vppdump.VPPChannel - swIfIndexes ifaceidx.SwIfIndex - stopwatch *measure.Stopwatch - setACLStopwatch measure.StopWatchEntry -} - -// NewACLInterfacesVppCalls constructs IP ACL interfaces vpp calls object -func NewACLInterfacesVppCalls(log logging.Logger, vppChan vppdump.VPPChannel, swIfIndexes ifaceidx.SwIfIndex, stopwatch *measure.Stopwatch) *ACLInterfacesVppCalls { - return &ACLInterfacesVppCalls{ - log: log, - vppChan: vppChan, - swIfIndexes: swIfIndexes, - setACLStopwatch: measure.GetTimeLog(acl_api.ACLInterfaceSetACLList{}, stopwatch), - } -} - -// SetACLToInterfacesAsIngress sets ACL to all provided interfaces as ingress -func (acl *ACLInterfacesVppCalls) SetACLToInterfacesAsIngress(ACLIndex uint32, ifIndices []uint32) error { - acl.log.Debugf("Setting up IP ingress ACL from interfaces: %v ", ifIndices) - - return acl.requestSetACLToInterfaces(&ACLInterfaceLogicalReq{ +func (handler *aclVppHandler) SetACLToInterfacesAsIngress(ACLIndex uint32, ifIndices []uint32) error { + return handler.requestSetACLToInterfaces(&ACLInterfaceLogicalReq{ aclIndex: ACLIndex, ifIndices: ifIndices, ingress: true, }) } -// RemoveIPIngressACLFromInterfaces removes ACL from interfaces -func (acl *ACLInterfacesVppCalls) RemoveIPIngressACLFromInterfaces(ACLIndex uint32, ifIndices []uint32) error { - acl.log.Debugf("Removing IP ingress ACL from interfaces: %v ", ifIndices) - - return acl.requestRemoveInterfacesFromACL(&ACLInterfaceLogicalReq{ +func (handler *aclVppHandler) RemoveIPIngressACLFromInterfaces(ACLIndex uint32, ifIndices []uint32) error { + return handler.requestRemoveInterfacesFromACL(&ACLInterfaceLogicalReq{ aclIndex: ACLIndex, ifIndices: ifIndices, ingress: true, }) } -// SetACLToInterfacesAsEgress sets ACL to all provided interfaces as egress -func (acl *ACLInterfacesVppCalls) SetACLToInterfacesAsEgress(ACLIndex uint32, ifIndices []uint32) error { - acl.log.Debugf("Setting up IP egress ACL from interfaces: %v ", ifIndices) - - return acl.requestSetACLToInterfaces(&ACLInterfaceLogicalReq{ +func (handler *aclVppHandler) SetACLToInterfacesAsEgress(ACLIndex uint32, ifIndices []uint32) error { + return handler.requestSetACLToInterfaces(&ACLInterfaceLogicalReq{ aclIndex: ACLIndex, ifIndices: ifIndices, ingress: false, }) } -// RemoveIPEgressACLFromInterfaces removes ACL from interfaces -func (acl *ACLInterfacesVppCalls) RemoveIPEgressACLFromInterfaces(ACLIndex uint32, ifIndices []uint32) error { - acl.log.Debugf("Removing IP egress ACL from interfaces: %v ", ifIndices) - - return acl.requestRemoveInterfacesFromACL(&ACLInterfaceLogicalReq{ +func (handler *aclVppHandler) RemoveIPEgressACLFromInterfaces(ACLIndex uint32, ifIndices []uint32) error { + return handler.requestRemoveInterfacesFromACL(&ACLInterfaceLogicalReq{ aclIndex: ACLIndex, ifIndices: ifIndices, ingress: false, }) } -func (acl *ACLInterfacesVppCalls) requestSetACLToInterfaces(logicalReq *ACLInterfaceLogicalReq) error { +func (handler *aclVppHandler) SetMacIPAclToInterface(aclIndex uint32, ifIndices []uint32) error { + setACLStopwatch := measure.GetTimeLog(acl_api.MacipACLInterfaceAddDel{}, handler.stopwatch) + for _, ingressIfIdx := range ifIndices { + // Measure MacipACLInterfaceAddDel time + start := time.Now() + + req := &acl_api.MacipACLInterfaceAddDel{} + req.ACLIndex = aclIndex + req.IsAdd = 1 + req.SwIfIndex = ingressIfIdx + + reply := &acl_api.MacipACLInterfaceAddDelReply{} + + err := handler.callsChannel.SendRequest(req).ReceiveReply(reply) + if err != nil { + return fmt.Errorf("failed to set interface %v to L2 ACL %v", ingressIfIdx, aclIndex) + } + if reply.Retval != 0 { + return fmt.Errorf("set interface %v to L2 ACL %v returned %v", ingressIfIdx, aclIndex, reply.Retval) + } + + // Log MacipACLInterfaceAddDel time measurement results. + if setACLStopwatch != nil { + setACLStopwatch.LogTimeEntry(time.Since(start)) + } + } + + return nil +} + +func (handler *aclVppHandler) RemoveMacIPIngressACLFromInterfaces(removedACLIndex uint32, ifIndices []uint32) error { + setACLStopwatch := measure.GetTimeLog(acl_api.MacipACLInterfaceAddDel{}, handler.stopwatch) + for _, ifIdx := range ifIndices { + // Measure MacipACLInterfaceAddDel time. + start := time.Now() + + req := &acl_api.MacipACLInterfaceAddDel{} + req.ACLIndex = removedACLIndex + req.SwIfIndex = ifIdx + req.IsAdd = 0 + + reply := &acl_api.MacipACLInterfaceAddDelReply{} + + err := handler.callsChannel.SendRequest(req).ReceiveReply(reply) + if err != nil { + return fmt.Errorf("failed to remove L2 ACL %v from interface %v", removedACLIndex, ifIdx) + } + if reply.Retval != 0 { + return fmt.Errorf("remove L2 ACL %v from interface %v returned error %v", removedACLIndex, + removedACLIndex, reply.Retval) + } + + // Log MacipACLInterfaceAddDel time measurement results. + if setACLStopwatch != nil { + setACLStopwatch.LogTimeEntry(time.Since(start)) + } + } + return nil +} + +func (handler *aclVppHandler) requestSetACLToInterfaces(logicalReq *ACLInterfaceLogicalReq) error { + setACLStopwatch := measure.GetTimeLog(acl_api.ACLInterfaceSetACLList{}, handler.stopwatch) for _, aclIfIdx := range logicalReq.ifIndices { // Create acl list with new entry var ACLs []uint32 // All previously assigned ACLs have to be dumped and added to acl list - aclInterfaceDetails, err := vppdump.DumpInterfaceIPACLs(aclIfIdx, acl.vppChan, acl.stopwatch) + aclInterfaceDetails, err := handler.DumpInterfaceIPACLs(aclIfIdx) if err != nil { return err } @@ -139,7 +166,7 @@ func (acl *ACLInterfacesVppCalls) requestSetACLToInterfaces(logicalReq *ACLInter msg.NInput = nInput reply := &acl_api.ACLInterfaceSetACLListReply{} - err = acl.vppChan.SendRequest(msg).ReceiveReply(reply) + err = handler.callsChannel.SendRequest(msg).ReceiveReply(reply) if err != nil { return err } @@ -147,25 +174,24 @@ func (acl *ACLInterfacesVppCalls) requestSetACLToInterfaces(logicalReq *ACLInter return fmt.Errorf("setting up interface ACL list returned %v", reply.Retval) } - acl.log.WithFields(logging.Fields{"SwIdx index": msg.SwIfIndex, "AclIdx": logicalReq.aclIndex}).Debug("Interface set to ACL") - // Log ACLInterfaceSetACLList time measurement results - if acl.setACLStopwatch != nil { - acl.setACLStopwatch.LogTimeEntry(time.Since(start)) + if setACLStopwatch != nil { + setACLStopwatch.LogTimeEntry(time.Since(start)) } } return nil } -func (acl *ACLInterfacesVppCalls) requestRemoveInterfacesFromACL(logicalReq *ACLInterfaceLogicalReq) error { +func (handler *aclVppHandler) requestRemoveInterfacesFromACL(logicalReq *ACLInterfaceLogicalReq) error { + setACLStopwatch := measure.GetTimeLog(acl_api.ACLInterfaceSetACLList{}, handler.stopwatch) var wasErr error for _, aclIfIdx := range logicalReq.ifIndices { // Create empty ACL list var ACLs []uint32 // All assigned ACLs have to be dumped - aclInterfaceDetails, err := vppdump.DumpInterfaceIPACLs(aclIfIdx, acl.vppChan, acl.stopwatch) + aclInterfaceDetails, err := handler.DumpInterfaceIPACLs(aclIfIdx) if err != nil { return err } @@ -198,7 +224,7 @@ func (acl *ACLInterfacesVppCalls) requestRemoveInterfacesFromACL(logicalReq *ACL msg.NInput = nInput reply := &acl_api.ACLInterfaceSetACLListReply{} - err = acl.vppChan.SendRequest(msg).ReceiveReply(reply) + err = handler.callsChannel.SendRequest(msg).ReceiveReply(reply) if err != nil { wasErr = err } @@ -206,75 +232,11 @@ func (acl *ACLInterfacesVppCalls) requestRemoveInterfacesFromACL(logicalReq *ACL wasErr = fmt.Errorf("setting up interface ACL list returned %v", reply.Retval) } - acl.log.WithFields(logging.Fields{"SwIdx index": msg.SwIfIndex, "AclIdx": logicalReq.aclIndex}).Debug("Interface removed from ACL") - // Log ACLInterfaceSetACLList time measurement results - if acl.setACLStopwatch != nil { - acl.setACLStopwatch.LogTimeEntry(time.Since(start)) + if setACLStopwatch != nil { + setACLStopwatch.LogTimeEntry(time.Since(start)) } } return wasErr } - -// SetMacIPAclToInterface adds L2 ACL to interface. -func (acl *ACLInterfacesVppCalls) SetMacIPAclToInterface(aclIndex uint32, ifIndices []uint32) error { - for _, ingressIfIdx := range ifIndices { - // Measure MacipACLInterfaceAddDel time - start := time.Now() - - req := &acl_api.MacipACLInterfaceAddDel{} - req.ACLIndex = aclIndex - req.IsAdd = 1 - req.SwIfIndex = ingressIfIdx - - reply := &acl_api.MacipACLInterfaceAddDelReply{} - - err := acl.vppChan.SendRequest(req).ReceiveReply(reply) - if err != nil { - return fmt.Errorf("failed to set interface %v to L2 ACL %v", ingressIfIdx, aclIndex) - } - if reply.Retval != 0 { - return fmt.Errorf("set interface %v to L2 ACL %v returned %v", ingressIfIdx, aclIndex, reply.Retval) - } - acl.log.Debugf("Interface %v set to L2 ACL %v as ingress", ingressIfIdx, aclIndex) - - // Log MacipACLInterfaceAddDel time measurement results. - if acl.setACLStopwatch != nil { - acl.setACLStopwatch.LogTimeEntry(time.Since(start)) - } - } - - return nil -} - -// RemoveMacIPIngressACLFromInterfaces removes L2 ACL from interfaces. -func (acl *ACLInterfacesVppCalls) RemoveMacIPIngressACLFromInterfaces(removedACLIndex uint32, ifIndices []uint32) error { - for _, ifIdx := range ifIndices { - // Measure MacipACLInterfaceAddDel time. - start := time.Now() - - req := &acl_api.MacipACLInterfaceAddDel{} - req.ACLIndex = removedACLIndex - req.SwIfIndex = ifIdx - req.IsAdd = 0 - - reply := &acl_api.MacipACLInterfaceAddDelReply{} - - err := acl.vppChan.SendRequest(req).ReceiveReply(reply) - if err != nil { - return fmt.Errorf("failed to remove L2 ACL %v from interface %v", removedACLIndex, ifIdx) - } - if reply.Retval != 0 { - return fmt.Errorf("remove L2 ACL %v from interface %v returned error %v", removedACLIndex, - removedACLIndex, reply.Retval) - } - acl.log.Debugf("L2 ACL %v removed from interface %v (ingress)", removedACLIndex, ifIdx) - - // Log MacipACLInterfaceAddDel time measurement results. - if acl.setACLStopwatch != nil { - acl.setACLStopwatch.LogTimeEntry(time.Since(start)) - } - } - return nil -} diff --git a/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls_test.go b/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls_test.go index 6334df7060..1d01c903f3 100644 --- a/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls_test.go +++ b/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls_test.go @@ -17,10 +17,7 @@ package vppcalls import ( "testing" - "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/vpp-agent/idxvpp/nametoidx" acl_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/acl" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) @@ -30,8 +27,7 @@ func TestRequestSetACLToInterfaces(t *testing.T) { ctx := vppcallmock.SetupTestCtx(t) defer ctx.TeardownTestCtx() - ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test-plugin", nil)) - interfaces := NewACLInterfacesVppCalls(logrus.DefaultLogger(), ctx.MockChannel, ifIndexes, nil) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceListDetails{ 0, @@ -40,7 +36,7 @@ func TestRequestSetACLToInterfaces(t *testing.T) { []uint32{0, 1}, }) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceSetACLListReply{}) - err := interfaces.SetACLToInterfacesAsIngress(0, []uint32{0}) + err := aclHandler.SetACLToInterfacesAsIngress(0, []uint32{0}) Expect(err).To(BeNil()) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceListDetails{ @@ -50,13 +46,13 @@ func TestRequestSetACLToInterfaces(t *testing.T) { []uint32{0, 1}, }) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceSetACLListReply{}) - err = interfaces.SetACLToInterfacesAsEgress(0, []uint32{0}) + err = aclHandler.SetACLToInterfacesAsEgress(0, []uint32{0}) Expect(err).To(BeNil()) // error cases ctx.MockVpp.MockReply(&acl_api.ACLInterfaceSetACLListReply{}) - err = interfaces.SetACLToInterfacesAsIngress(0, []uint32{0}) + err = aclHandler.SetACLToInterfacesAsIngress(0, []uint32{0}) Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceListDetails{ @@ -66,7 +62,7 @@ func TestRequestSetACLToInterfaces(t *testing.T) { []uint32{0, 1}, }) ctx.MockVpp.MockReply(&acl_api.MacipACLAddReplaceReply{}) - err = interfaces.SetACLToInterfacesAsIngress(0, []uint32{0}) + err = aclHandler.SetACLToInterfacesAsIngress(0, []uint32{0}) Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceListDetails{ @@ -76,7 +72,7 @@ func TestRequestSetACLToInterfaces(t *testing.T) { []uint32{0, 1}, }) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceSetACLListReply{Retval: -1}) - err = interfaces.SetACLToInterfacesAsIngress(0, []uint32{0}) + err = aclHandler.SetACLToInterfacesAsIngress(0, []uint32{0}) Expect(err).To(Not(BeNil())) } @@ -85,8 +81,7 @@ func TestRequestRemoveInterfacesFromACL(t *testing.T) { ctx := vppcallmock.SetupTestCtx(t) defer ctx.TeardownTestCtx() - ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test-plugin", nil)) - interfaces := NewACLInterfacesVppCalls(logrus.DefaultLogger(), ctx.MockChannel, ifIndexes, nil) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceListDetails{ 0, @@ -95,7 +90,7 @@ func TestRequestRemoveInterfacesFromACL(t *testing.T) { []uint32{0, 1}, }) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceSetACLListReply{}) - err := interfaces.RemoveIPIngressACLFromInterfaces(0, []uint32{0}) + err := aclHandler.RemoveIPIngressACLFromInterfaces(0, []uint32{0}) Expect(err).To(BeNil()) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceListDetails{ @@ -105,13 +100,13 @@ func TestRequestRemoveInterfacesFromACL(t *testing.T) { []uint32{0, 1}, }) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceSetACLListReply{}) - err = interfaces.RemoveIPEgressACLFromInterfaces(0, []uint32{0}) + err = aclHandler.RemoveIPEgressACLFromInterfaces(0, []uint32{0}) Expect(err).To(BeNil()) // error cases ctx.MockVpp.MockReply(&acl_api.ACLInterfaceSetACLListReply{}) - err = interfaces.RemoveIPEgressACLFromInterfaces(0, []uint32{0}) + err = aclHandler.RemoveIPEgressACLFromInterfaces(0, []uint32{0}) Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceListDetails{ @@ -121,7 +116,7 @@ func TestRequestRemoveInterfacesFromACL(t *testing.T) { []uint32{0, 1}, }) ctx.MockVpp.MockReply(&acl_api.MacipACLAddReplaceReply{}) - err = interfaces.RemoveIPEgressACLFromInterfaces(0, []uint32{0}) + err = aclHandler.RemoveIPEgressACLFromInterfaces(0, []uint32{0}) Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceListDetails{ @@ -131,7 +126,7 @@ func TestRequestRemoveInterfacesFromACL(t *testing.T) { []uint32{0, 1}, }) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceSetACLListReply{Retval: -1}) - err = interfaces.RemoveIPEgressACLFromInterfaces(0, []uint32{0}) + err = aclHandler.RemoveIPEgressACLFromInterfaces(0, []uint32{0}) Expect(err).To(Not(BeNil())) } @@ -140,21 +135,20 @@ func TestSetMacIPAclToInterface(t *testing.T) { ctx := vppcallmock.SetupTestCtx(t) defer ctx.TeardownTestCtx() - ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test-plugin", nil)) - interfaces := NewACLInterfacesVppCalls(logrus.DefaultLogger(), ctx.MockChannel, ifIndexes, nil) + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) ctx.MockVpp.MockReply(&acl_api.MacipACLInterfaceAddDelReply{}) - err := interfaces.SetMacIPAclToInterface(0, []uint32{0}) + err := aclHandler.SetMacIPAclToInterface(0, []uint32{0}) Expect(err).To(BeNil()) // error cases ctx.MockVpp.MockReply(&acl_api.MacipACLAddReplaceReply{}) - err = interfaces.SetMacIPAclToInterface(0, []uint32{0}) + err = aclHandler.SetMacIPAclToInterface(0, []uint32{0}) Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.MacipACLInterfaceAddDelReply{Retval: -1}) - err = interfaces.SetMacIPAclToInterface(0, []uint32{0}) + err = aclHandler.SetMacIPAclToInterface(0, []uint32{0}) Expect(err).To(Not(BeNil())) } @@ -163,26 +157,19 @@ func TestRemoveMacIPIngressACLFromInterfaces(t *testing.T) { ctx := vppcallmock.SetupTestCtx(t) defer ctx.TeardownTestCtx() - ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test-plugin", nil)) - interfaces := ACLInterfacesVppCalls{ - logrus.DefaultLogger(), - ctx.MockChannel, - ifIndexes, - nil, - nil, - } + aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) ctx.MockVpp.MockReply(&acl_api.MacipACLInterfaceAddDelReply{}) - err := interfaces.RemoveMacIPIngressACLFromInterfaces(1, []uint32{0}) + err := aclHandler.RemoveMacIPIngressACLFromInterfaces(1, []uint32{0}) Expect(err).To(BeNil()) // error cases ctx.MockVpp.MockReply(&acl_api.MacipACLAddReplaceReply{}) - err = interfaces.RemoveMacIPIngressACLFromInterfaces(0, []uint32{0}) + err = aclHandler.RemoveMacIPIngressACLFromInterfaces(0, []uint32{0}) Expect(err).To(Not(BeNil())) ctx.MockVpp.MockReply(&acl_api.MacipACLInterfaceAddDelReply{Retval: -1}) - err = interfaces.RemoveMacIPIngressACLFromInterfaces(0, []uint32{0}) + err = aclHandler.RemoveMacIPIngressACLFromInterfaces(0, []uint32{0}) Expect(err).To(Not(BeNil())) } diff --git a/plugins/vpp/aclplugin/vppcalls/vpp_channel.go b/plugins/vpp/aclplugin/vppcalls/vpp_channel.go index dbd9b9ead1..c5b19d08dd 100644 --- a/plugins/vpp/aclplugin/vppcalls/vpp_channel.go +++ b/plugins/vpp/aclplugin/vppcalls/vpp_channel.go @@ -23,4 +23,6 @@ type VPPChannel interface { SendRequest(msg govppapi.Message) *govppapi.RequestCtx SendMultiRequest(msg govppapi.Message) *govppapi.MultiRequestCtx + + CheckMessageCompatibility(messages ...govppapi.Message) error } diff --git a/plugins/vpp/aclplugin/vppdump/doc.go b/plugins/vpp/aclplugin/vppdump/doc.go deleted file mode 100644 index c7b54c339d..0000000000 --- a/plugins/vpp/aclplugin/vppdump/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package vppdump provides helpers to dump ACLs configured in VPP - per -// interface and total. -package vppdump diff --git a/plugins/vpp/aclplugin/vppdump/vpp_channel.go b/plugins/vpp/aclplugin/vppdump/vpp_channel.go deleted file mode 100644 index e3b153a4c4..0000000000 --- a/plugins/vpp/aclplugin/vppdump/vpp_channel.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2018 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package vppdump - -import ( - govppapi "git.fd.io/govpp.git/api" -) - -// VPPChannel is interface for send request to VPP channel -type VPPChannel interface { - SendRequest(msg govppapi.Message) *govppapi.RequestCtx - - SendMultiRequest(msg govppapi.Message) *govppapi.MultiRequestCtx - - CheckMessageCompatibility(messages ...govppapi.Message) error -} diff --git a/plugins/vpp/plugin_impl_vpp.go b/plugins/vpp/plugin_impl_vpp.go index e92593c490..4414960ee1 100644 --- a/plugins/vpp/plugin_impl_vpp.go +++ b/plugins/vpp/plugin_impl_vpp.go @@ -129,9 +129,9 @@ type Plugin struct { omittedPrefixes []string // list of keys which won't be resynced // From config file + stopwatch *measure.Stopwatch ifMtu uint32 resyncStrategy string - enableStopwatch bool // Common statusCheckReg bool @@ -422,7 +422,7 @@ func (plugin *Plugin) initIF(ctx context.Context) error { // Interface configurator plugin.ifVppNotifChan = make(chan govppapi.Message, 100) plugin.ifConfigurator = &ifplugin.InterfaceConfigurator{} - if err := plugin.ifConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.Linux, plugin.ifVppNotifChan, plugin.ifMtu, plugin.enableStopwatch); err != nil { + if err := plugin.ifConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.Linux, plugin.ifVppNotifChan, plugin.ifMtu, false); err != nil { return err } plugin.Log.Debug("ifConfigurator Initialized") @@ -447,21 +447,21 @@ func (plugin *Plugin) initIF(ctx context.Context) error { // BFD configurator plugin.bfdConfigurator = &ifplugin.BFDConfigurator{} - if err := plugin.bfdConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { + if err := plugin.bfdConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { return err } plugin.Log.Debug("bfdConfigurator Initialized") // STN configurator plugin.stnConfigurator = &ifplugin.StnConfigurator{} - if err := plugin.stnConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { + if err := plugin.stnConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { return err } plugin.Log.Debug("stnConfigurator Initialized") // NAT configurator plugin.natConfigurator = &ifplugin.NatConfigurator{} - if err := plugin.natConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { + if err := plugin.natConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { return err } plugin.Log.Debug("natConfigurator Initialized") @@ -474,7 +474,7 @@ func (plugin *Plugin) initIPSec(ctx context.Context) (err error) { // IPSec configurator plugin.ipSecConfigurator = &ipsecplugin.IPSecConfigurator{} - if err = plugin.ipSecConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { + if err = plugin.ipSecConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { return err } @@ -487,7 +487,7 @@ func (plugin *Plugin) initACL(ctx context.Context) error { // ACL configurator plugin.aclConfigurator = &aclplugin.ACLConfigurator{} - err := plugin.aclConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch) + err := plugin.aclConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.stopwatch) if err != nil { return err } @@ -502,7 +502,7 @@ func (plugin *Plugin) initL2(ctx context.Context) error { // Bridge domain configurator plugin.bdVppNotifChan = make(chan l2plugin.BridgeDomainStateMessage, 100) plugin.bdConfigurator = &l2plugin.BDConfigurator{} - err := plugin.bdConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.bdVppNotifChan, plugin.enableStopwatch) + err := plugin.bdConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.bdVppNotifChan, false) if err != nil { return err } @@ -527,7 +527,7 @@ func (plugin *Plugin) initL2(ctx context.Context) error { // L2 FIB configurator plugin.fibConfigurator = &l2plugin.FIBConfigurator{} - err = plugin.fibConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.bdIndexes, plugin.enableStopwatch) + err = plugin.fibConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.bdIndexes, false) if err != nil { return err } @@ -535,7 +535,7 @@ func (plugin *Plugin) initL2(ctx context.Context) error { // L2 cross connect plugin.xcConfigurator = &l2plugin.XConnectConfigurator{} - if err := plugin.xcConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { + if err := plugin.xcConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { return err } plugin.Log.Debug("xcConfigurator Initialized") @@ -548,21 +548,21 @@ func (plugin *Plugin) initL3(ctx context.Context) error { // ARP configurator plugin.arpConfigurator = &l3plugin.ArpConfigurator{} - if err := plugin.arpConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { + if err := plugin.arpConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { return err } plugin.Log.Debug("arpConfigurator Initialized") // Proxy ARP configurator plugin.proxyArpConfigurator = &l3plugin.ProxyArpConfigurator{} - if err := plugin.proxyArpConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { + if err := plugin.proxyArpConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { return err } plugin.Log.Debug("proxyArpConfigurator Initialized") // Route configurator plugin.routeConfigurator = &l3plugin.RouteConfigurator{} - if err := plugin.routeConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { + if err := plugin.routeConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { return err } plugin.Log.Debug("routeConfigurator Initialized") @@ -575,7 +575,7 @@ func (plugin *Plugin) initL4(ctx context.Context) error { // Application namespace conifgurator plugin.appNsConfigurator = &l4plugin.AppNsConfigurator{} - if err := plugin.appNsConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { + if err := plugin.appNsConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { return err } plugin.Log.Debug("l4Configurator Initialized") @@ -590,7 +590,7 @@ func (plugin *Plugin) initSR(ctx context.Context) (err error) { srLogger := plugin.Log.NewLogger("-sr-plugin") var stopwatch *measure.Stopwatch - if plugin.enableStopwatch { + if false { stopwatch = measure.NewStopwatch("SRConfigurator", srLogger) } // configuring configurators @@ -640,8 +640,9 @@ func (plugin *Plugin) fromConfigFile() { plugin.ifMtu = config.Mtu plugin.Log.Infof("Default MTU set to %v", plugin.ifMtu) } - plugin.enableStopwatch = config.Stopwatch - if plugin.enableStopwatch { + + if config.Stopwatch { + plugin.stopwatch = measure.NewStopwatch("VppPlugin", plugin.Log) plugin.Log.Infof("stopwatch enabled for %v", plugin.PluginName) } else { plugin.Log.Infof("stopwatch disabled for %v", plugin.PluginName) From 5f2478fed93264660d19bc300e459fedf7b51d15 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Wed, 4 Jul 2018 14:50:26 +0200 Subject: [PATCH 002/174] fixed typo Signed-off-by: Vladimir Lavor --- plugins/vpp/aclplugin/vppcalls/api_vppcalls.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go b/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go index 1c197c36a8..679378cc1e 100644 --- a/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go @@ -73,7 +73,7 @@ type AclVppAPI interface { DumpInterfaces() ([]*acl_api.ACLInterfaceListDetails, []*acl_api.MacipACLInterfaceListDetails, error) } -// netLinkHandler is accessor for acl-related vppcalls methods +// aclVppHandler is accessor for acl-related vppcalls methods type aclVppHandler struct { stopwatch *measure.Stopwatch callsChannel VPPChannel From 74853c170b4a3e7f96a77000f7ba0d46ea058a44 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Tue, 10 Jul 2018 09:04:46 +0200 Subject: [PATCH 003/174] resolve comments Signed-off-by: Vladimir Lavor --- plugins/vpp/aclplugin/acl_config.go | 10 ++--- plugins/vpp/aclplugin/data_resync.go | 11 +++-- .../vpp/aclplugin/vppcalls/dump_vppcalls.go | 1 - .../aclplugin/vppcalls/interfaces_vppcalls.go | 40 ++++++++++--------- plugins/vpp/plugin_impl_vpp.go | 36 ++++++++--------- 5 files changed, 50 insertions(+), 48 deletions(-) diff --git a/plugins/vpp/aclplugin/acl_config.go b/plugins/vpp/aclplugin/acl_config.go index 98b5031a9b..ec26533892 100644 --- a/plugins/vpp/aclplugin/acl_config.go +++ b/plugins/vpp/aclplugin/acl_config.go @@ -76,7 +76,7 @@ type ACLConfigurator struct { // Init goroutines, channels and mappings. func (plugin *ACLConfigurator) Init(logger logging.PluginLogger, goVppMux govppmux.API, swIfIndexes ifaceidx.SwIfIndex, - stopwatch *measure.Stopwatch) (err error) { + enableStopwatch bool) (err error) { // Logger plugin.log = logger.NewLogger("-acl-plugin") plugin.log.Infof("Initializing ACL configurator") @@ -96,8 +96,11 @@ func (plugin *ACLConfigurator) Init(logger logging.PluginLogger, goVppMux govppm return err } + // Configurator-wide stopwatch instance + plugin.stopwatch = measure.NewStopwatch("ACL-configurator", plugin.log) + // ACL binary api handler - plugin.aclHandler = vppcalls.NewAclVppHandler(plugin.vppChan, plugin.vppDumpChan, stopwatch) + plugin.aclHandler = vppcalls.NewAclVppHandler(plugin.vppChan, plugin.vppDumpChan, plugin.stopwatch) // Message compatibility if err = plugin.vppChan.CheckMessageCompatibility(vppcalls.AclMessages...); err != nil { @@ -105,9 +108,6 @@ func (plugin *ACLConfigurator) Init(logger logging.PluginLogger, goVppMux govppm return err } - // Configurator-wide stopwatch instance - plugin.stopwatch = stopwatch - // Get VPP ACL plugin version var aclVersion string if aclVersion, err = plugin.aclHandler.GetAclPluginVersion(); err != nil { diff --git a/plugins/vpp/aclplugin/data_resync.go b/plugins/vpp/aclplugin/data_resync.go index 1d11d813b5..c4530b28c6 100644 --- a/plugins/vpp/aclplugin/data_resync.go +++ b/plugins/vpp/aclplugin/data_resync.go @@ -15,9 +15,6 @@ package aclplugin import ( - "time" - - acl_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/acl" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" ) @@ -25,9 +22,11 @@ import ( func (plugin *ACLConfigurator) Resync(nbACLs []*acl.AccessLists_Acl) error { plugin.log.Debug("Resync ACLs started") // Calculate and log acl resync. - defer func(t time.Time) { - plugin.stopwatch.TimeLog(acl_api.MacipACLDel{}).LogTimeEntry(time.Since(t)) - }(time.Now()) + defer func() { + if plugin.stopwatch != nil { + plugin.stopwatch.PrintLog() + } + }() // Re-initialize cache plugin.clearMapping() diff --git a/plugins/vpp/aclplugin/vppcalls/dump_vppcalls.go b/plugins/vpp/aclplugin/vppcalls/dump_vppcalls.go index ac42ab0a93..1e1ac07168 100644 --- a/plugins/vpp/aclplugin/vppcalls/dump_vppcalls.go +++ b/plugins/vpp/aclplugin/vppcalls/dump_vppcalls.go @@ -56,7 +56,6 @@ type ACLToInterface struct { } func (handler *aclVppHandler) DumpIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*ACLEntry, error) { - ruleIPData := make(map[ACLIdentifier][]*acl.AccessLists_Acl_Rule) // get all ACLs with IP ruleData diff --git a/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls.go b/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls.go index 5a95cb7f2c..ae47c9710e 100644 --- a/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls.go +++ b/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls.go @@ -67,10 +67,11 @@ func (handler *aclVppHandler) SetMacIPAclToInterface(aclIndex uint32, ifIndices // Measure MacipACLInterfaceAddDel time start := time.Now() - req := &acl_api.MacipACLInterfaceAddDel{} - req.ACLIndex = aclIndex - req.IsAdd = 1 - req.SwIfIndex = ingressIfIdx + req := &acl_api.MacipACLInterfaceAddDel{ + ACLIndex: aclIndex, + IsAdd: 1, + SwIfIndex: ingressIfIdx, + } reply := &acl_api.MacipACLInterfaceAddDelReply{} @@ -97,10 +98,11 @@ func (handler *aclVppHandler) RemoveMacIPIngressACLFromInterfaces(removedACLInde // Measure MacipACLInterfaceAddDel time. start := time.Now() - req := &acl_api.MacipACLInterfaceAddDel{} - req.ACLIndex = removedACLIndex - req.SwIfIndex = ifIdx - req.IsAdd = 0 + req := &acl_api.MacipACLInterfaceAddDel{ + ACLIndex: removedACLIndex, + SwIfIndex: ifIdx, + IsAdd: 0, + } reply := &acl_api.MacipACLInterfaceAddDelReply{} @@ -159,11 +161,12 @@ func (handler *aclVppHandler) requestSetACLToInterfaces(logicalReq *ACLInterface // Measure ACLInterfaceSetACLList time start := time.Now() - msg := &acl_api.ACLInterfaceSetACLList{} - msg.Acls = ACLs - msg.Count = uint8(len(ACLs)) - msg.SwIfIndex = aclIfIdx - msg.NInput = nInput + msg := &acl_api.ACLInterfaceSetACLList{ + Acls: ACLs, + Count: uint8(len(ACLs)), + SwIfIndex: aclIfIdx, + NInput: nInput, + } reply := &acl_api.ACLInterfaceSetACLListReply{} err = handler.callsChannel.SendRequest(msg).ReceiveReply(reply) @@ -217,11 +220,12 @@ func (handler *aclVppHandler) requestRemoveInterfacesFromACL(logicalReq *ACLInte // Measure ACLInterfaceSetACLList time start := time.Now() - msg := &acl_api.ACLInterfaceSetACLList{} - msg.Acls = ACLs - msg.Count = uint8(len(ACLs)) - msg.SwIfIndex = aclIfIdx - msg.NInput = nInput + msg := &acl_api.ACLInterfaceSetACLList{ + Acls: ACLs, + Count: uint8(len(ACLs)), + SwIfIndex: aclIfIdx, + NInput: nInput, + } reply := &acl_api.ACLInterfaceSetACLListReply{} err = handler.callsChannel.SendRequest(msg).ReceiveReply(reply) diff --git a/plugins/vpp/plugin_impl_vpp.go b/plugins/vpp/plugin_impl_vpp.go index 4414960ee1..22cfdd1057 100644 --- a/plugins/vpp/plugin_impl_vpp.go +++ b/plugins/vpp/plugin_impl_vpp.go @@ -129,7 +129,7 @@ type Plugin struct { omittedPrefixes []string // list of keys which won't be resynced // From config file - stopwatch *measure.Stopwatch + enableStopwatch bool ifMtu uint32 resyncStrategy string @@ -422,7 +422,7 @@ func (plugin *Plugin) initIF(ctx context.Context) error { // Interface configurator plugin.ifVppNotifChan = make(chan govppapi.Message, 100) plugin.ifConfigurator = &ifplugin.InterfaceConfigurator{} - if err := plugin.ifConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.Linux, plugin.ifVppNotifChan, plugin.ifMtu, false); err != nil { + if err := plugin.ifConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.Linux, plugin.ifVppNotifChan, plugin.ifMtu, plugin.enableStopwatch); err != nil { return err } plugin.Log.Debug("ifConfigurator Initialized") @@ -447,21 +447,21 @@ func (plugin *Plugin) initIF(ctx context.Context) error { // BFD configurator plugin.bfdConfigurator = &ifplugin.BFDConfigurator{} - if err := plugin.bfdConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { + if err := plugin.bfdConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { return err } plugin.Log.Debug("bfdConfigurator Initialized") // STN configurator plugin.stnConfigurator = &ifplugin.StnConfigurator{} - if err := plugin.stnConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { + if err := plugin.stnConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { return err } plugin.Log.Debug("stnConfigurator Initialized") // NAT configurator plugin.natConfigurator = &ifplugin.NatConfigurator{} - if err := plugin.natConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { + if err := plugin.natConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { return err } plugin.Log.Debug("natConfigurator Initialized") @@ -474,7 +474,7 @@ func (plugin *Plugin) initIPSec(ctx context.Context) (err error) { // IPSec configurator plugin.ipSecConfigurator = &ipsecplugin.IPSecConfigurator{} - if err = plugin.ipSecConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { + if err = plugin.ipSecConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { return err } @@ -487,7 +487,7 @@ func (plugin *Plugin) initACL(ctx context.Context) error { // ACL configurator plugin.aclConfigurator = &aclplugin.ACLConfigurator{} - err := plugin.aclConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.stopwatch) + err := plugin.aclConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch) if err != nil { return err } @@ -502,7 +502,7 @@ func (plugin *Plugin) initL2(ctx context.Context) error { // Bridge domain configurator plugin.bdVppNotifChan = make(chan l2plugin.BridgeDomainStateMessage, 100) plugin.bdConfigurator = &l2plugin.BDConfigurator{} - err := plugin.bdConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.bdVppNotifChan, false) + err := plugin.bdConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.bdVppNotifChan, plugin.enableStopwatch) if err != nil { return err } @@ -527,7 +527,7 @@ func (plugin *Plugin) initL2(ctx context.Context) error { // L2 FIB configurator plugin.fibConfigurator = &l2plugin.FIBConfigurator{} - err = plugin.fibConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.bdIndexes, false) + err = plugin.fibConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.bdIndexes, plugin.enableStopwatch) if err != nil { return err } @@ -535,7 +535,7 @@ func (plugin *Plugin) initL2(ctx context.Context) error { // L2 cross connect plugin.xcConfigurator = &l2plugin.XConnectConfigurator{} - if err := plugin.xcConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { + if err := plugin.xcConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { return err } plugin.Log.Debug("xcConfigurator Initialized") @@ -548,21 +548,21 @@ func (plugin *Plugin) initL3(ctx context.Context) error { // ARP configurator plugin.arpConfigurator = &l3plugin.ArpConfigurator{} - if err := plugin.arpConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { + if err := plugin.arpConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { return err } plugin.Log.Debug("arpConfigurator Initialized") // Proxy ARP configurator plugin.proxyArpConfigurator = &l3plugin.ProxyArpConfigurator{} - if err := plugin.proxyArpConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { + if err := plugin.proxyArpConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { return err } plugin.Log.Debug("proxyArpConfigurator Initialized") // Route configurator plugin.routeConfigurator = &l3plugin.RouteConfigurator{} - if err := plugin.routeConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { + if err := plugin.routeConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { return err } plugin.Log.Debug("routeConfigurator Initialized") @@ -575,7 +575,7 @@ func (plugin *Plugin) initL4(ctx context.Context) error { // Application namespace conifgurator plugin.appNsConfigurator = &l4plugin.AppNsConfigurator{} - if err := plugin.appNsConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, false); err != nil { + if err := plugin.appNsConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { return err } plugin.Log.Debug("l4Configurator Initialized") @@ -590,7 +590,7 @@ func (plugin *Plugin) initSR(ctx context.Context) (err error) { srLogger := plugin.Log.NewLogger("-sr-plugin") var stopwatch *measure.Stopwatch - if false { + if plugin.enableStopwatch { stopwatch = measure.NewStopwatch("SRConfigurator", srLogger) } // configuring configurators @@ -642,10 +642,10 @@ func (plugin *Plugin) fromConfigFile() { } if config.Stopwatch { - plugin.stopwatch = measure.NewStopwatch("VppPlugin", plugin.Log) - plugin.Log.Infof("stopwatch enabled for %v", plugin.PluginName) + plugin.enableStopwatch = true + plugin.Log.Info("stopwatch enabled for VPP plugins") } else { - plugin.Log.Infof("stopwatch disabled for %v", plugin.PluginName) + plugin.Log.Info("stopwatch disabled VPP plugins") } // return skip (if set) or value from config plugin.resyncStrategy = plugin.resolveResyncStrategy(config.Strategy) From 701ec31ac38bc8871d3852a2e38d6c9782a8b8c9 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Tue, 10 Jul 2018 09:07:41 +0200 Subject: [PATCH 004/174] fix test Signed-off-by: Vladimir Lavor --- plugins/vpp/aclplugin/acl_config_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/vpp/aclplugin/acl_config_test.go b/plugins/vpp/aclplugin/acl_config_test.go index f45187fa77..cd716b5cf2 100644 --- a/plugins/vpp/aclplugin/acl_config_test.go +++ b/plugins/vpp/aclplugin/acl_config_test.go @@ -87,7 +87,7 @@ func TestAclConfiguratorInit(t *testing.T) { ctx.MockVpp.MockReply(&acl_api.ACLPluginGetVersionReply{}) // Test init - err := plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, nil, nil) + err := plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, nil, false) Expect(err).To(BeNil()) err = plugin.Close() Expect(err).To(BeNil()) @@ -446,7 +446,7 @@ func aclTestSetup(t *testing.T, createIfs bool) (*vppcallmock.TestCtx, *core.Con // Configurator ctx.MockVpp.MockReply(&acl_api.ACLPluginGetVersionReply{1, 0}) plugin := &aclplugin.ACLConfigurator{} - err = plugin.Init(log, connection, ifIndexes, nil) + err = plugin.Init(log, connection, ifIndexes, false) Expect(err).To(BeNil()) return ctx, connection, plugin From b6d4565b2df531b2416b3d54d7b5625ca3ae67d1 Mon Sep 17 00:00:00 2001 From: Filip Gschwandtner Date: Thu, 12 Jul 2018 18:21:29 +0200 Subject: [PATCH 005/174] upgraded vpp version to get big VPP API change for SR functionality Signed-off-by: Filip Gschwandtner --- Makefile | 1 + plugins/vpp/binapi/fixapi-sr.patch | 16 ++ plugins/vpp/binapi/sr/pkgreflect.go | 10 + plugins/vpp/binapi/sr/sr.go | 399 +++++++++++++++++++++----- plugins/vpp/srplugin/vppcalls/srv6.go | 74 +++-- vpp.env | 2 +- 6 files changed, 400 insertions(+), 102 deletions(-) create mode 100644 plugins/vpp/binapi/fixapi-sr.patch diff --git a/Makefile b/Makefile index 1db48135fe..e97a1c145d 100644 --- a/Makefile +++ b/Makefile @@ -136,6 +136,7 @@ generate-binapi: get-binapi-generators cd plugins/vpp/binapi/vxlan && pkgreflect @echo "=> applying fix patch" patch -p1 -i plugins/vpp/binapi/fixapi.patch + patch -p1 -i plugins/vpp/binapi/fixapi-sr.patch verify-binapi: @echo "=> verifying binary api" diff --git a/plugins/vpp/binapi/fixapi-sr.patch b/plugins/vpp/binapi/fixapi-sr.patch new file mode 100644 index 0000000000..230eb66a91 --- /dev/null +++ b/plugins/vpp/binapi/fixapi-sr.patch @@ -0,0 +1,16 @@ +diff --git a/plugins/vpp/binapi/sr/sr.go b/plugins/vpp/binapi/sr/sr.go +index 9325fbb..15780c3 100644 +--- a/plugins/vpp/binapi/sr/sr.go ++++ b/plugins/vpp/binapi/sr/sr.go +@@ -51,9 +51,9 @@ func (*Srv6Sid) GetCrcString() string { + // } + // + type Srv6SidList struct { +- NumSids uint8 ++ NumSids uint8 `struc:"sizeof=Sids"` + Weight uint32 +- Sids []Srv6Sid `struc:"[16]Srv6Sid"` ++ Sids []Srv6Sid + } + + func (*Srv6SidList) GetTypeName() string { diff --git a/plugins/vpp/binapi/sr/pkgreflect.go b/plugins/vpp/binapi/sr/pkgreflect.go index 95792a035d..3997d43771 100644 --- a/plugins/vpp/binapi/sr/pkgreflect.go +++ b/plugins/vpp/binapi/sr/pkgreflect.go @@ -10,6 +10,8 @@ var Types = map[string]reflect.Type{ "SrLocalsidAddDelReply": reflect.TypeOf((*SrLocalsidAddDelReply)(nil)).Elem(), "SrLocalsidsDetails": reflect.TypeOf((*SrLocalsidsDetails)(nil)).Elem(), "SrLocalsidsDump": reflect.TypeOf((*SrLocalsidsDump)(nil)).Elem(), + "SrPoliciesDetails": reflect.TypeOf((*SrPoliciesDetails)(nil)).Elem(), + "SrPoliciesDump": reflect.TypeOf((*SrPoliciesDump)(nil)).Elem(), "SrPolicyAdd": reflect.TypeOf((*SrPolicyAdd)(nil)).Elem(), "SrPolicyAddReply": reflect.TypeOf((*SrPolicyAddReply)(nil)).Elem(), "SrPolicyDel": reflect.TypeOf((*SrPolicyDel)(nil)).Elem(), @@ -20,6 +22,10 @@ var Types = map[string]reflect.Type{ "SrSetEncapSourceReply": reflect.TypeOf((*SrSetEncapSourceReply)(nil)).Elem(), "SrSteeringAddDel": reflect.TypeOf((*SrSteeringAddDel)(nil)).Elem(), "SrSteeringAddDelReply": reflect.TypeOf((*SrSteeringAddDelReply)(nil)).Elem(), + "SrSteeringPolDetails": reflect.TypeOf((*SrSteeringPolDetails)(nil)).Elem(), + "SrSteeringPolDump": reflect.TypeOf((*SrSteeringPolDump)(nil)).Elem(), + "Srv6Sid": reflect.TypeOf((*Srv6Sid)(nil)).Elem(), + "Srv6SidList": reflect.TypeOf((*Srv6SidList)(nil)).Elem(), } var Functions = map[string]reflect.Value{ @@ -27,6 +33,8 @@ var Functions = map[string]reflect.Value{ "NewSrLocalsidAddDelReply": reflect.ValueOf(NewSrLocalsidAddDelReply), "NewSrLocalsidsDetails": reflect.ValueOf(NewSrLocalsidsDetails), "NewSrLocalsidsDump": reflect.ValueOf(NewSrLocalsidsDump), + "NewSrPoliciesDetails": reflect.ValueOf(NewSrPoliciesDetails), + "NewSrPoliciesDump": reflect.ValueOf(NewSrPoliciesDump), "NewSrPolicyAdd": reflect.ValueOf(NewSrPolicyAdd), "NewSrPolicyAddReply": reflect.ValueOf(NewSrPolicyAddReply), "NewSrPolicyDel": reflect.ValueOf(NewSrPolicyDel), @@ -37,6 +45,8 @@ var Functions = map[string]reflect.Value{ "NewSrSetEncapSourceReply": reflect.ValueOf(NewSrSetEncapSourceReply), "NewSrSteeringAddDel": reflect.ValueOf(NewSrSteeringAddDel), "NewSrSteeringAddDelReply": reflect.ValueOf(NewSrSteeringAddDelReply), + "NewSrSteeringPolDetails": reflect.ValueOf(NewSrSteeringPolDetails), + "NewSrSteeringPolDump": reflect.ValueOf(NewSrSteeringPolDump), } var Variables = map[string]reflect.Value{ diff --git a/plugins/vpp/binapi/sr/sr.go b/plugins/vpp/binapi/sr/sr.go index 127f163cb4..15780c332f 100644 --- a/plugins/vpp/binapi/sr/sr.go +++ b/plugins/vpp/binapi/sr/sr.go @@ -5,8 +5,66 @@ package sr import "git.fd.io/govpp.git/api" +// Srv6Sid represents the VPP binary API data type 'srv6_sid'. +// Generated from '/usr/share/vpp/api/sr.api.json', line 577: +// +// "srv6_sid", +// [ +// "u8", +// "addr", +// 16 +// ], +// { +// "crc": "0x6ee67284" +// } +// +type Srv6Sid struct { + Addr []byte `struc:"[16]byte"` +} + +func (*Srv6Sid) GetTypeName() string { + return "srv6_sid" +} +func (*Srv6Sid) GetCrcString() string { + return "6ee67284" +} + +// Srv6SidList represents the VPP binary API data type 'srv6_sid_list'. +// Generated from '/usr/share/vpp/api/sr.api.json', line 588: +// +// "srv6_sid_list", +// [ +// "u8", +// "num_sids" +// ], +// [ +// "u32", +// "weight" +// ], +// [ +// "vl_api_srv6_sid_t", +// "sids", +// 16 +// ], +// { +// "crc": "0x4066af74" +// } +// +type Srv6SidList struct { + NumSids uint8 `struc:"sizeof=Sids"` + Weight uint32 + Sids []Srv6Sid +} + +func (*Srv6SidList) GetTypeName() string { + return "srv6_sid_list" +} +func (*Srv6SidList) GetCrcString() string { + return "4066af74" +} + // SrIP6Address represents the VPP binary API data type 'sr_ip6_address'. -// Generated from '/usr/share/vpp/api/sr.api.json', line 455: +// Generated from '/usr/share/vpp/api/sr.api.json', line 607: // // "sr_ip6_address", // [ @@ -50,9 +108,8 @@ func (*SrIP6Address) GetCrcString() string { // "is_del" // ], // [ -// "u8", -// "localsid_addr", -// 16 +// "vl_api_srv6_sid_t", +// "localsid" // ], // [ // "u8", @@ -76,22 +133,28 @@ func (*SrIP6Address) GetCrcString() string { // ], // [ // "u8", -// "nh_addr", +// "nh_addr6", // 16 // ], +// [ +// "u8", +// "nh_addr4", +// 4 +// ], // { -// "crc": "0xa833a891" +// "crc": "0x20d478a0" // } // type SrLocalsidAddDel struct { - IsDel uint8 - LocalsidAddr []byte `struc:"[16]byte"` - EndPsp uint8 - Behavior uint8 - SwIfIndex uint32 - VlanIndex uint32 - FibTable uint32 - NhAddr []byte `struc:"[16]byte"` + IsDel uint8 + Localsid Srv6Sid + EndPsp uint8 + Behavior uint8 + SwIfIndex uint32 + VlanIndex uint32 + FibTable uint32 + NhAddr6 []byte `struc:"[16]byte"` + NhAddr4 []byte `struc:"[4]byte"` } func (*SrLocalsidAddDel) GetMessageName() string { @@ -101,14 +164,14 @@ func (*SrLocalsidAddDel) GetMessageType() api.MessageType { return api.RequestMessage } func (*SrLocalsidAddDel) GetCrcString() string { - return "a833a891" + return "20d478a0" } func NewSrLocalsidAddDel() api.Message { return &SrLocalsidAddDel{} } // SrLocalsidAddDelReply represents the VPP binary API message 'sr_localsid_add_del_reply'. -// Generated from '/usr/share/vpp/api/sr.api.json', line 56: +// Generated from '/usr/share/vpp/api/sr.api.json', line 60: // // "sr_localsid_add_del_reply", // [ @@ -145,7 +208,7 @@ func NewSrLocalsidAddDelReply() api.Message { } // SrPolicyAdd represents the VPP binary API message 'sr_policy_add'. -// Generated from '/usr/share/vpp/api/sr.api.json', line 74: +// Generated from '/usr/share/vpp/api/sr.api.json', line 78: // // "sr_policy_add", // [ @@ -182,27 +245,20 @@ func NewSrLocalsidAddDelReply() api.Message { // "fib_table" // ], // [ -// "u8", -// "n_segments" -// ], -// [ -// "vl_api_sr_ip6_address_t", -// "segments", -// 0, -// "n_segments" +// "vl_api_srv6_sid_list_t", +// "sids" // ], // { -// "crc": "0x6869ac7f" +// "crc": "0xa1676c1f" // } // type SrPolicyAdd struct { - BsidAddr []byte `struc:"[16]byte"` - Weight uint32 - IsEncap uint8 - Type uint8 - FibTable uint32 - NSegments uint8 `struc:"sizeof=Segments"` - Segments []SrIP6Address + BsidAddr []byte `struc:"[16]byte"` + Weight uint32 + IsEncap uint8 + Type uint8 + FibTable uint32 + Sids Srv6SidList } func (*SrPolicyAdd) GetMessageName() string { @@ -212,14 +268,14 @@ func (*SrPolicyAdd) GetMessageType() api.MessageType { return api.RequestMessage } func (*SrPolicyAdd) GetCrcString() string { - return "6869ac7f" + return "a1676c1f" } func NewSrPolicyAdd() api.Message { return &SrPolicyAdd{} } // SrPolicyAddReply represents the VPP binary API message 'sr_policy_add_reply'. -// Generated from '/usr/share/vpp/api/sr.api.json', line 123: +// Generated from '/usr/share/vpp/api/sr.api.json', line 121: // // "sr_policy_add_reply", // [ @@ -256,7 +312,7 @@ func NewSrPolicyAddReply() api.Message { } // SrPolicyMod represents the VPP binary API message 'sr_policy_mod'. -// Generated from '/usr/share/vpp/api/sr.api.json', line 141: +// Generated from '/usr/share/vpp/api/sr.api.json', line 139: // // "sr_policy_mod", // [ @@ -297,17 +353,11 @@ func NewSrPolicyAddReply() api.Message { // "weight" // ], // [ -// "u8", -// "n_segments" -// ], -// [ -// "vl_api_sr_ip6_address_t", -// "segments", -// 0, -// "n_segments" +// "vl_api_srv6_sid_list_t", +// "sids" // ], // { -// "crc": "0xad91f4b2" +// "crc": "0x51252136" // } // type SrPolicyMod struct { @@ -317,8 +367,7 @@ type SrPolicyMod struct { Operation uint8 SlIndex uint32 Weight uint32 - NSegments uint8 `struc:"sizeof=Segments"` - Segments []SrIP6Address + Sids Srv6SidList } func (*SrPolicyMod) GetMessageName() string { @@ -328,14 +377,14 @@ func (*SrPolicyMod) GetMessageType() api.MessageType { return api.RequestMessage } func (*SrPolicyMod) GetCrcString() string { - return "ad91f4b2" + return "51252136" } func NewSrPolicyMod() api.Message { return &SrPolicyMod{} } // SrPolicyModReply represents the VPP binary API message 'sr_policy_mod_reply'. -// Generated from '/usr/share/vpp/api/sr.api.json', line 194: +// Generated from '/usr/share/vpp/api/sr.api.json', line 186: // // "sr_policy_mod_reply", // [ @@ -372,7 +421,7 @@ func NewSrPolicyModReply() api.Message { } // SrPolicyDel represents the VPP binary API message 'sr_policy_del'. -// Generated from '/usr/share/vpp/api/sr.api.json', line 212: +// Generated from '/usr/share/vpp/api/sr.api.json', line 204: // // "sr_policy_del", // [ @@ -388,20 +437,19 @@ func NewSrPolicyModReply() api.Message { // "context" // ], // [ -// "u8", -// "bsid_addr", -// 16 +// "vl_api_srv6_sid_t", +// "bsid_addr" // ], // [ // "u32", // "sr_policy_index" // ], // { -// "crc": "0x0388e561" +// "crc": "0x168e1a98" // } // type SrPolicyDel struct { - BsidAddr []byte `struc:"[16]byte"` + BsidAddr Srv6Sid SrPolicyIndex uint32 } @@ -412,14 +460,14 @@ func (*SrPolicyDel) GetMessageType() api.MessageType { return api.RequestMessage } func (*SrPolicyDel) GetCrcString() string { - return "0388e561" + return "168e1a98" } func NewSrPolicyDel() api.Message { return &SrPolicyDel{} } // SrPolicyDelReply represents the VPP binary API message 'sr_policy_del_reply'. -// Generated from '/usr/share/vpp/api/sr.api.json', line 239: +// Generated from '/usr/share/vpp/api/sr.api.json', line 230: // // "sr_policy_del_reply", // [ @@ -456,7 +504,7 @@ func NewSrPolicyDelReply() api.Message { } // SrSetEncapSource represents the VPP binary API message 'sr_set_encap_source'. -// Generated from '/usr/share/vpp/api/sr.api.json', line 257: +// Generated from '/usr/share/vpp/api/sr.api.json', line 248: // // "sr_set_encap_source", // [ @@ -498,7 +546,7 @@ func NewSrSetEncapSource() api.Message { } // SrSetEncapSourceReply represents the VPP binary API message 'sr_set_encap_source_reply'. -// Generated from '/usr/share/vpp/api/sr.api.json', line 280: +// Generated from '/usr/share/vpp/api/sr.api.json', line 271: // // "sr_set_encap_source_reply", // [ @@ -535,7 +583,7 @@ func NewSrSetEncapSourceReply() api.Message { } // SrSteeringAddDel represents the VPP binary API message 'sr_steering_add_del'. -// Generated from '/usr/share/vpp/api/sr.api.json', line 298: +// Generated from '/usr/share/vpp/api/sr.api.json', line 289: // // "sr_steering_add_del", // [ @@ -613,7 +661,7 @@ func NewSrSteeringAddDel() api.Message { } // SrSteeringAddDelReply represents the VPP binary API message 'sr_steering_add_del_reply'. -// Generated from '/usr/share/vpp/api/sr.api.json', line 350: +// Generated from '/usr/share/vpp/api/sr.api.json', line 341: // // "sr_steering_add_del_reply", // [ @@ -650,7 +698,7 @@ func NewSrSteeringAddDelReply() api.Message { } // SrLocalsidsDump represents the VPP binary API message 'sr_localsids_dump'. -// Generated from '/usr/share/vpp/api/sr.api.json', line 368: +// Generated from '/usr/share/vpp/api/sr.api.json', line 359: // // "sr_localsids_dump", // [ @@ -686,7 +734,7 @@ func NewSrLocalsidsDump() api.Message { } // SrLocalsidsDetails represents the VPP binary API message 'sr_localsids_details'. -// Generated from '/usr/share/vpp/api/sr.api.json', line 386: +// Generated from '/usr/share/vpp/api/sr.api.json', line 377: // // "sr_localsids_details", // [ @@ -698,9 +746,8 @@ func NewSrLocalsidsDump() api.Message { // "context" // ], // [ -// "u8", -// "address", -// 16 +// "vl_api_srv6_sid_t", +// "addr" // ], // [ // "u8", @@ -715,24 +762,35 @@ func NewSrLocalsidsDump() api.Message { // "fib_table" // ], // [ +// "u32", +// "vlan_index" +// ], +// [ // "u8", -// "xconnect_next_hop", +// "xconnect_nh_addr6", // 16 // ], // [ +// "u8", +// "xconnect_nh_addr4", +// 4 +// ], +// [ // "u32", // "xconnect_iface_or_vrf_table" // ], // { -// "crc": "0xb6556a9c" +// "crc": "0x7ff35765" // } // type SrLocalsidsDetails struct { - Address []byte `struc:"[16]byte"` + Addr Srv6Sid EndPsp uint8 Behavior uint16 FibTable uint32 - XconnectNextHop []byte `struc:"[16]byte"` + VlanIndex uint32 + XconnectNhAddr6 []byte `struc:"[16]byte"` + XconnectNhAddr4 []byte `struc:"[4]byte"` XconnectIfaceOrVrfTable uint32 } @@ -743,8 +801,207 @@ func (*SrLocalsidsDetails) GetMessageType() api.MessageType { return api.ReplyMessage } func (*SrLocalsidsDetails) GetCrcString() string { - return "b6556a9c" + return "7ff35765" } func NewSrLocalsidsDetails() api.Message { return &SrLocalsidsDetails{} } + +// SrPoliciesDump represents the VPP binary API message 'sr_policies_dump'. +// Generated from '/usr/share/vpp/api/sr.api.json', line 425: +// +// "sr_policies_dump", +// [ +// "u16", +// "_vl_msg_id" +// ], +// [ +// "u32", +// "client_index" +// ], +// [ +// "u32", +// "context" +// ], +// { +// "crc": "0x51077d14" +// } +// +type SrPoliciesDump struct { +} + +func (*SrPoliciesDump) GetMessageName() string { + return "sr_policies_dump" +} +func (*SrPoliciesDump) GetMessageType() api.MessageType { + return api.RequestMessage +} +func (*SrPoliciesDump) GetCrcString() string { + return "51077d14" +} +func NewSrPoliciesDump() api.Message { + return &SrPoliciesDump{} +} + +// SrPoliciesDetails represents the VPP binary API message 'sr_policies_details'. +// Generated from '/usr/share/vpp/api/sr.api.json', line 443: +// +// "sr_policies_details", +// [ +// "u16", +// "_vl_msg_id" +// ], +// [ +// "u32", +// "context" +// ], +// [ +// "vl_api_srv6_sid_t", +// "bsid" +// ], +// [ +// "u8", +// "type" +// ], +// [ +// "u8", +// "is_encap" +// ], +// [ +// "u32", +// "fib_table" +// ], +// [ +// "u8", +// "num_sid_lists" +// ], +// [ +// "vl_api_srv6_sid_list_t", +// "sid_lists", +// 0, +// "num_sid_lists" +// ], +// { +// "crc": "0xae838a76" +// } +// +type SrPoliciesDetails struct { + Bsid Srv6Sid + Type uint8 + IsEncap uint8 + FibTable uint32 + NumSidLists uint8 `struc:"sizeof=SidLists"` + SidLists []Srv6SidList +} + +func (*SrPoliciesDetails) GetMessageName() string { + return "sr_policies_details" +} +func (*SrPoliciesDetails) GetMessageType() api.MessageType { + return api.ReplyMessage +} +func (*SrPoliciesDetails) GetCrcString() string { + return "ae838a76" +} +func NewSrPoliciesDetails() api.Message { + return &SrPoliciesDetails{} +} + +// SrSteeringPolDump represents the VPP binary API message 'sr_steering_pol_dump'. +// Generated from '/usr/share/vpp/api/sr.api.json', line 483: +// +// "sr_steering_pol_dump", +// [ +// "u16", +// "_vl_msg_id" +// ], +// [ +// "u32", +// "client_index" +// ], +// [ +// "u32", +// "context" +// ], +// { +// "crc": "0x51077d14" +// } +// +type SrSteeringPolDump struct { +} + +func (*SrSteeringPolDump) GetMessageName() string { + return "sr_steering_pol_dump" +} +func (*SrSteeringPolDump) GetMessageType() api.MessageType { + return api.RequestMessage +} +func (*SrSteeringPolDump) GetCrcString() string { + return "51077d14" +} +func NewSrSteeringPolDump() api.Message { + return &SrSteeringPolDump{} +} + +// SrSteeringPolDetails represents the VPP binary API message 'sr_steering_pol_details'. +// Generated from '/usr/share/vpp/api/sr.api.json', line 501: +// +// "sr_steering_pol_details", +// [ +// "u16", +// "_vl_msg_id" +// ], +// [ +// "u32", +// "context" +// ], +// [ +// "u8", +// "traffic_type" +// ], +// [ +// "u32", +// "fib_table" +// ], +// [ +// "u8", +// "prefix_addr", +// 16 +// ], +// [ +// "u32", +// "mask_width" +// ], +// [ +// "u32", +// "sw_if_index" +// ], +// [ +// "vl_api_srv6_sid_t", +// "bsid" +// ], +// { +// "crc": "0x1c756f85" +// } +// +type SrSteeringPolDetails struct { + TrafficType uint8 + FibTable uint32 + PrefixAddr []byte `struc:"[16]byte"` + MaskWidth uint32 + SwIfIndex uint32 + Bsid Srv6Sid +} + +func (*SrSteeringPolDetails) GetMessageName() string { + return "sr_steering_pol_details" +} +func (*SrSteeringPolDetails) GetMessageType() api.MessageType { + return api.ReplyMessage +} +func (*SrSteeringPolDetails) GetCrcString() string { + return "1c756f85" +} +func NewSrSteeringPolDetails() api.Message { + return &SrSteeringPolDetails{} +} diff --git a/plugins/vpp/srplugin/vppcalls/srv6.go b/plugins/vpp/srplugin/vppcalls/srv6.go index 6f589f9fea..8f84cf90ff 100644 --- a/plugins/vpp/srplugin/vppcalls/srv6.go +++ b/plugins/vpp/srplugin/vppcalls/srv6.go @@ -111,8 +111,8 @@ func (calls *srv6Calls) addDelLocalSid(deletion bool, sidAddr net.IP, localSID * }(time.Now()) req := &sr.SrLocalsidAddDel{ - IsDel: boolToUint(deletion), - LocalsidAddr: []byte(sidAddr), + IsDel: boolToUint(deletion), + Localsid: sr.Srv6Sid{Addr: []byte(sidAddr)}, } if !deletion { req.FibTable = localSID.FibTableId // where to install localsid entry @@ -177,11 +177,15 @@ func (calls *srv6Calls) writeEndFunction(req *sr.SrLocalsidAddDel, sidAddr net.I return fmt.Errorf("for interface %v doesn't exist sw index", localSID.EndFunction_X.OutgoingInterface) } req.SwIfIndex = interfaceSwIndex - nhAddr, err := parseIPv6(localSID.EndFunction_X.NextHop) + nhAddr, err := parseIPv6(localSID.EndFunction_X.NextHop) // parses also ipv4 addresses but into ipv6 address form if err != nil { return err } - req.NhAddr = []byte(nhAddr) + if nhAddr4 := nhAddr.To4(); nhAddr4 != nil { // ipv4 address in ipv6 address form? + req.NhAddr4 = nhAddr4 + } else { + req.NhAddr6 = []byte(nhAddr) + } } else if localSID.EndFunction_T != nil { req.Behavior = BehaviorT req.EndPsp = boolToUint(localSID.EndFunction_T.Psp) @@ -193,11 +197,15 @@ func (calls *srv6Calls) writeEndFunction(req *sr.SrLocalsidAddDel, sidAddr net.I return fmt.Errorf("for interface %v doesn't exist sw index", localSID.EndFunction_DX2.OutgoingInterface) } req.SwIfIndex = interfaceSwIndex - nhAddr, err := parseIPv6(localSID.EndFunction_DX2.NextHop) + nhAddr, err := parseIPv6(localSID.EndFunction_DX2.NextHop) // parses also ipv4 addresses but into ipv6 address form if err != nil { return err } - req.NhAddr = []byte(nhAddr) + if nhAddr4 := nhAddr.To4(); nhAddr4 != nil { // ipv4 address in ipv6 address form? + req.NhAddr4 = nhAddr4 + } else { + req.NhAddr6 = []byte(nhAddr) + } } else if localSID.EndFunction_DX4 != nil { req.Behavior = BehaviorDX4 interfaceSwIndex, _, exists := swIfIndex.LookupIdx(localSID.EndFunction_DX4.OutgoingInterface) @@ -212,7 +220,7 @@ func (calls *srv6Calls) writeEndFunction(req *sr.SrLocalsidAddDel, sidAddr net.I if nhAddr.To4() == nil { return fmt.Errorf("next hop of DX4 end function (%v) is not valid IPv4 address", localSID.EndFunction_DX4.NextHop) } - req.NhAddr = []byte(nhAddr) + req.NhAddr4 = []byte(nhAddr) } else if localSID.EndFunction_DX6 != nil { req.Behavior = BehaviorDX6 interfaceSwIndex, _, exists := swIfIndex.LookupIdx(localSID.EndFunction_DX6.OutgoingInterface) @@ -220,11 +228,11 @@ func (calls *srv6Calls) writeEndFunction(req *sr.SrLocalsidAddDel, sidAddr net.I return fmt.Errorf("for interface %v doesn't exist sw index", localSID.EndFunction_DX6.OutgoingInterface) } req.SwIfIndex = interfaceSwIndex - nhAddr, err := parseIPv6(localSID.EndFunction_DX6.NextHop) + nhAddr6, err := parseIPv6(localSID.EndFunction_DX6.NextHop) if err != nil { return err } - req.NhAddr = []byte(nhAddr) + req.NhAddr6 = []byte(nhAddr6) } else if localSID.EndFunction_DT4 != nil { req.Behavior = BehaviorDT4 } else if localSID.EndFunction_DT6 != nil { @@ -271,18 +279,17 @@ func (calls *srv6Calls) AddPolicy(bindingSid net.IP, policy *srv6.Policy, policy calls.stopwatch.TimeLog(sr.SrPolicyAdd{}).LogTimeEntry(time.Since(t)) }(time.Now()) - segmentsCount, segments, err := calls.convertNextSidList(policySegment.Segments) + sids, err := calls.convertPolicySegment(policySegment) if err != nil { return err } + // Note: Weight in sr.SrPolicyAdd is leftover from API changes that moved weight into sr.Srv6SidList (it is weight of sid list not of the whole policy) req := &sr.SrPolicyAdd{ - BsidAddr: []byte(bindingSid), - Weight: policySegment.Weight, - NSegments: segmentsCount, - Segments: segments, - IsEncap: boolToUint(policy.SrhEncapsulation), - Type: boolToUint(policy.SprayBehaviour), - FibTable: policy.FibTableId, + BsidAddr: []byte(bindingSid), + Sids: *sids, + IsEncap: boolToUint(policy.SrhEncapsulation), + Type: boolToUint(policy.SprayBehaviour), + FibTable: policy.FibTableId, } reply := &sr.SrPolicyAddReply{} @@ -307,7 +314,7 @@ func (calls *srv6Calls) DeletePolicy(bindingSid net.IP, vppChan govppapi.Channel }(time.Now()) req := &sr.SrPolicyDel{ - BsidAddr: []byte(bindingSid), // TODO add ability to define policy also by index (SrPolicyIndex) + BsidAddr: sr.Srv6Sid{Addr: []byte(bindingSid)}, // TODO add ability to define policy also by index (SrPolicyIndex) } reply := &sr.SrPolicyDelReply{} @@ -353,16 +360,15 @@ func (calls *srv6Calls) modPolicy(operation uint8, bindingSid net.IP, policy *sr calls.stopwatch.TimeLog(sr.SrPolicyMod{}).LogTimeEntry(time.Since(t)) }(time.Now()) - segmentsCount, segments, err := calls.convertNextSidList(policySegment.Segments) + sids, err := calls.convertPolicySegment(policySegment) if err != nil { return err } + // Note: Weight in sr.SrPolicyMod is leftover from API changes that moved weight into sr.Srv6SidList (it is weight of sid list not of the whole policy) req := &sr.SrPolicyMod{ BsidAddr: []byte(bindingSid), // TODO add ability to define policy also by index (SrPolicyIndex) Operation: operation, - Weight: policySegment.Weight, - NSegments: segmentsCount, - Segments: segments, + Sids: *sids, FibTable: policy.FibTableId, } if operation == DeleteSRList || operation == ModifyWeightOfSRList { @@ -380,23 +386,31 @@ func (calls *srv6Calls) modPolicy(operation uint8, bindingSid net.IP, policy *sr return nil } -func (calls *srv6Calls) convertNextSidList(nextSidList []string) (uint8, []sr.SrIP6Address, error) { - segments := make([]sr.SrIP6Address, 0) - for _, sid := range nextSidList { +func (calls *srv6Calls) convertPolicySegment(policySegment *srv6.PolicySegment) (*sr.Srv6SidList, error) { + segments := make([]sr.Srv6Sid, 0) + for _, sid := range policySegment.Segments { // parse to IPv6 address parserSid, err := parseIPv6(sid) if err != nil { - return 0, []sr.SrIP6Address{}, err + return &sr.Srv6SidList{ + NumSids: 0, + Weight: 0, + Sids: []sr.Srv6Sid{}, + }, err } // add sid to segment list - ipv6Segment := sr.SrIP6Address{ - Data: make([]byte, 16), // sr.SrIP6Address.Data = [16]byte + ipv6Segment := sr.Srv6Sid{ + Addr: make([]byte, 16), // sr.Srv6Sid.Addr = [16]byte } - copy(ipv6Segment.Data, parserSid) + copy(ipv6Segment.Addr, parserSid) segments = append(segments, ipv6Segment) } - return uint8(len(nextSidList)), segments, nil + return &sr.Srv6SidList{ + NumSids: uint8(len(segments)), + Sids: segments, + Weight: policySegment.Weight, + }, nil } // AddSteering sets in VPP steering into SRv6 policy. diff --git a/vpp.env b/vpp.env index f6e9cf2178..e1a110a270 100644 --- a/vpp.env +++ b/vpp.env @@ -1,2 +1,2 @@ VPP_REPO_URL=https://gerrit.fd.io/r/vpp.git -VPP_COMMIT=a5ee900fb75201bbfceaf13c8bc57a13ed094988 +VPP_COMMIT=3337bd22002e9b78459082c34f7b78370b177eb0 From 65de7c93dda97633e255089a66b3bd8e08810d4b Mon Sep 17 00:00:00 2001 From: Filip Gschwandtner Date: Fri, 13 Jul 2018 10:23:59 +0200 Subject: [PATCH 006/174] updated existing sr tests for sr api changes in vpp Signed-off-by: Filip Gschwandtner --- plugins/vpp/srplugin/vppcalls/srv6_test.go | 221 +++++++++++---------- 1 file changed, 117 insertions(+), 104 deletions(-) diff --git a/plugins/vpp/srplugin/vppcalls/srv6_test.go b/plugins/vpp/srplugin/vppcalls/srv6_test.go index 5b647f5f3d..52540e5908 100644 --- a/plugins/vpp/srplugin/vppcalls/srv6_test.go +++ b/plugins/vpp/srplugin/vppcalls/srv6_test.go @@ -39,9 +39,9 @@ const ( ) var ( - sidA = sid("A::") - sidB = sid("B::") - sidC = sid("C::") + sidA = *sid("A::") + sidB = *sid("B::") + sidC = *sid("C::") nextHop = net.ParseIP("B::").To16() nextHopIPv4 = net.ParseIP("1.2.3.4") ) @@ -52,6 +52,7 @@ func init() { swIfIndex.RegisterName(ifaceA, swIndexA, nil) } +// TODO add tests for new nhAddr4 field in end behaviours // TestAddLocalSID tests all cases for method AddLocalSID func TestAddLocalSID(t *testing.T) { // Prepare different cases @@ -71,11 +72,11 @@ func TestAddLocalSID(t *testing.T) { }, }, Expected: &sr.SrLocalsidAddDel{ - IsDel: 0, - LocalsidAddr: sidA, - Behavior: vppcalls.BehaviorEnd, - FibTable: 10, - EndPsp: 1, + IsDel: 0, + Localsid: sidA, + Behavior: vppcalls.BehaviorEnd, + FibTable: 10, + EndPsp: 1, }, }, { @@ -89,13 +90,13 @@ func TestAddLocalSID(t *testing.T) { }, }, Expected: &sr.SrLocalsidAddDel{ - IsDel: 0, - LocalsidAddr: sidA, - Behavior: vppcalls.BehaviorX, - FibTable: 10, - EndPsp: 1, - SwIfIndex: swIndexA, - NhAddr: nextHop, + IsDel: 0, + Localsid: sidA, + Behavior: vppcalls.BehaviorX, + FibTable: 10, + EndPsp: 1, + SwIfIndex: swIndexA, + NhAddr6: nextHop, }, }, { @@ -107,11 +108,11 @@ func TestAddLocalSID(t *testing.T) { }, }, Expected: &sr.SrLocalsidAddDel{ - IsDel: 0, - LocalsidAddr: sidA, - Behavior: vppcalls.BehaviorT, - FibTable: 10, - EndPsp: 1, + IsDel: 0, + Localsid: sidA, + Behavior: vppcalls.BehaviorT, + FibTable: 10, + EndPsp: 1, }, }, { @@ -125,14 +126,14 @@ func TestAddLocalSID(t *testing.T) { }, }, Expected: &sr.SrLocalsidAddDel{ - IsDel: 0, - LocalsidAddr: sidA, - Behavior: vppcalls.BehaviorDX2, - FibTable: 10, - EndPsp: 0, - VlanIndex: 1, - SwIfIndex: swIndexA, - NhAddr: nextHop, + IsDel: 0, + Localsid: sidA, + Behavior: vppcalls.BehaviorDX2, + FibTable: 10, + EndPsp: 0, + VlanIndex: 1, + SwIfIndex: swIndexA, + NhAddr6: nextHop, }, }, { @@ -145,13 +146,13 @@ func TestAddLocalSID(t *testing.T) { }, }, Expected: &sr.SrLocalsidAddDel{ - IsDel: 0, - LocalsidAddr: sidA, - Behavior: vppcalls.BehaviorDX4, - FibTable: 10, - EndPsp: 0, - SwIfIndex: swIndexA, - NhAddr: nextHopIPv4, + IsDel: 0, + Localsid: sidA, + Behavior: vppcalls.BehaviorDX4, + FibTable: 10, + EndPsp: 0, + SwIfIndex: swIndexA, + NhAddr4: nextHopIPv4, }, }, { @@ -164,13 +165,13 @@ func TestAddLocalSID(t *testing.T) { }, }, Expected: &sr.SrLocalsidAddDel{ - IsDel: 0, - LocalsidAddr: sidA, - Behavior: vppcalls.BehaviorDX6, - FibTable: 10, - EndPsp: 0, - SwIfIndex: swIndexA, - NhAddr: nextHop, + IsDel: 0, + Localsid: sidA, + Behavior: vppcalls.BehaviorDX6, + FibTable: 10, + EndPsp: 0, + SwIfIndex: swIndexA, + NhAddr6: nextHop, }, }, // endDT4 and endDT6 are not fully modelled yet -> testing only current implementation @@ -181,11 +182,11 @@ func TestAddLocalSID(t *testing.T) { EndFunction_DT4: &srv6.LocalSID_EndDT4{}, }, Expected: &sr.SrLocalsidAddDel{ - IsDel: 0, - LocalsidAddr: sidA, - Behavior: vppcalls.BehaviorDT4, - FibTable: 10, - EndPsp: 0, + IsDel: 0, + Localsid: sidA, + Behavior: vppcalls.BehaviorDT4, + FibTable: 10, + EndPsp: 0, }, }, { @@ -195,11 +196,11 @@ func TestAddLocalSID(t *testing.T) { EndFunction_DT6: &srv6.LocalSID_EndDT6{}, }, Expected: &sr.SrLocalsidAddDel{ - IsDel: 0, - LocalsidAddr: sidA, - Behavior: vppcalls.BehaviorDT6, - FibTable: 10, - EndPsp: 0, + IsDel: 0, + Localsid: sidA, + Behavior: vppcalls.BehaviorDT6, + FibTable: 10, + EndPsp: 0, }, }, { @@ -337,7 +338,7 @@ func TestAddLocalSID(t *testing.T) { ctx.MockVpp.MockReply(&sr.SrLocalsidAddDelReply{}) } // make the call - err := vppCalls.AddLocalSid(sidA, td.Input, swIfIndex, ctx.MockChannel) + err := vppCalls.AddLocalSid(sidA.Addr, td.Input, swIfIndex, ctx.MockChannel) // verify result if td.ExpectFailure { Expect(err).Should(HaveOccurred()) @@ -361,19 +362,19 @@ func TestDeleteLocalSID(t *testing.T) { }{ { Name: "simple delete of local sid", - Sid: sidA, + Sid: sidA.Addr, MockReply: &sr.SrLocalsidAddDelReply{}, Verify: func(err error, catchedMsg govppapi.Message) { Expect(err).ShouldNot(HaveOccurred()) Expect(catchedMsg).To(Equal(&sr.SrLocalsidAddDel{ - IsDel: 1, - LocalsidAddr: sidA, + IsDel: 1, + Localsid: sidA, })) }, }, { Name: "failure propagation from VPP", - Sid: sidA, + Sid: sidA.Addr, MockReply: &sr.SrLocalsidAddDelReply{Retval: 1}, Verify: func(err error, msg govppapi.Message) { Expect(err).Should(HaveOccurred()) @@ -468,30 +469,32 @@ func TestAddPolicy(t *testing.T) { }{ { Name: "simple SetAddPolicy", - BSID: sidA, + BSID: sidA.Addr, Policy: policy(10, false, true), - PolicySegment: policySegment(1, sidA, sidB, sidC), + PolicySegment: policySegment(1, sidA.Addr, sidB.Addr, sidC.Addr), MockReply: &sr.SrPolicyAddReply{}, Verify: func(err error, catchedMsg govppapi.Message) { Expect(err).ShouldNot(HaveOccurred()) Expect(catchedMsg).To(Equal(&sr.SrPolicyAdd{ - BsidAddr: sidA, - FibTable: 10, - Type: boolToUint(false), - IsEncap: boolToUint(true), - Weight: 1, - NSegments: 3, - Segments: []sr.SrIP6Address{{Data: sidA}, {Data: sidB}, {Data: sidC}}, + BsidAddr: sidA.Addr, + FibTable: 10, + Type: boolToUint(false), + IsEncap: boolToUint(true), + Sids: sr.Srv6SidList{ + Weight: 1, + NumSids: 3, + Sids: []sr.Srv6Sid{{Addr: sidA.Addr}, {Addr: sidB.Addr}, {Addr: sidC.Addr}}, + }, })) }, }, { Name: "invalid SID (not IP address) in segment list", - BSID: sidA, + BSID: sidA.Addr, Policy: policy(10, false, true), PolicySegment: &srv6.PolicySegment{ Weight: 1, - Segments: []string{sidA.String(), invalidIPAddress, sidC.String()}, + Segments: []string{toString(sidA), invalidIPAddress, toString(sidC)}, }, MockReply: &sr.SrPolicyAddReply{}, Verify: func(err error, catchedMsg govppapi.Message) { @@ -500,9 +503,9 @@ func TestAddPolicy(t *testing.T) { }, { Name: "failure propagation from VPP", - BSID: sidA, + BSID: sidA.Addr, Policy: policy(0, true, true), - PolicySegment: policySegment(1, sidA, sidB, sidC), + PolicySegment: policySegment(1, sidA.Addr, sidB.Addr, sidC.Addr), MockReply: &sr.SrPolicyAddReply{Retval: 1}, Verify: func(err error, msg govppapi.Message) { Expect(err).Should(HaveOccurred()) @@ -534,7 +537,7 @@ func TestDeletePolicy(t *testing.T) { }{ { Name: "simple delete of policy", - BSID: sidA, + BSID: sidA.Addr, MockReply: &sr.SrPolicyDelReply{}, Verify: func(err error, catchedMsg govppapi.Message) { Expect(err).ShouldNot(HaveOccurred()) @@ -545,7 +548,7 @@ func TestDeletePolicy(t *testing.T) { }, { Name: "failure propagation from VPP", - BSID: sidA, + BSID: sidA.Addr, MockReply: &sr.SrPolicyDelReply{Retval: 1}, Verify: func(err error, msg govppapi.Message) { Expect(err).Should(HaveOccurred()) @@ -560,7 +563,7 @@ func TestDeletePolicy(t *testing.T) { defer teardown(ctx) // data and prepare case policy := policy(0, true, true) - segment := policySegment(1, sidA, sidB, sidC) + segment := policySegment(1, sidA.Addr, sidB.Addr, sidC.Addr) vppCalls.AddPolicy(td.BSID, policy, segment, ctx.MockChannel) ctx.MockVpp.MockReply(td.MockReply) // make the call and verify @@ -583,29 +586,31 @@ func TestAddPolicySegment(t *testing.T) { }{ { Name: "simple addition of policy segment", - BSID: sidA, + BSID: sidA.Addr, Policy: policy(10, false, true), - PolicySegment: policySegment(1, sidA, sidB, sidC), + PolicySegment: policySegment(1, sidA.Addr, sidB.Addr, sidC.Addr), MockReply: &sr.SrPolicyModReply{}, Verify: func(err error, catchedMsg govppapi.Message) { Expect(err).ShouldNot(HaveOccurred()) Expect(catchedMsg).To(Equal(&sr.SrPolicyMod{ - BsidAddr: sidA, + BsidAddr: sidA.Addr, Operation: vppcalls.AddSRList, FibTable: 10, - Weight: 1, - NSegments: 3, - Segments: []sr.SrIP6Address{{Data: sidA}, {Data: sidB}, {Data: sidC}}, + Sids: sr.Srv6SidList{ + Weight: 1, + NumSids: 3, + Sids: []sr.Srv6Sid{{Addr: sidA.Addr}, {Addr: sidB.Addr}, {Addr: sidC.Addr}}, + }, })) }, }, { Name: "invalid SID (not IP address) in segment list", - BSID: sidA, + BSID: sidA.Addr, Policy: policy(10, false, true), PolicySegment: &srv6.PolicySegment{ Weight: 1, - Segments: []string{sidA.String(), invalidIPAddress, sidC.String()}, + Segments: []string{toString(sidA), invalidIPAddress, toString(sidC)}, }, MockReply: &sr.SrPolicyModReply{}, Verify: func(err error, catchedMsg govppapi.Message) { @@ -614,9 +619,9 @@ func TestAddPolicySegment(t *testing.T) { }, { Name: "failure propagation from VPP", - BSID: sidA, + BSID: sidA.Addr, Policy: policy(0, true, true), - PolicySegment: policySegment(1, sidA, sidB, sidC), + PolicySegment: policySegment(1, sidA.Addr, sidB.Addr, sidC.Addr), MockReply: &sr.SrPolicyModReply{Retval: 1}, Verify: func(err error, msg govppapi.Message) { Expect(err).Should(HaveOccurred()) @@ -651,31 +656,33 @@ func TestDeletePolicySegment(t *testing.T) { }{ { Name: "simple deletion of policy segment", - BSID: sidA, + BSID: sidA.Addr, Policy: policy(10, false, true), - PolicySegment: policySegment(1, sidA, sidB, sidC), + PolicySegment: policySegment(1, sidA.Addr, sidB.Addr, sidC.Addr), SegmentIndex: 111, MockReply: &sr.SrPolicyModReply{}, Verify: func(err error, catchedMsg govppapi.Message) { Expect(err).ShouldNot(HaveOccurred()) Expect(catchedMsg).To(Equal(&sr.SrPolicyMod{ - BsidAddr: sidA, + BsidAddr: sidA.Addr, Operation: vppcalls.DeleteSRList, SlIndex: 111, FibTable: 10, - Weight: 1, - NSegments: 3, - Segments: []sr.SrIP6Address{{Data: sidA}, {Data: sidB}, {Data: sidC}}, + Sids: sr.Srv6SidList{ + Weight: 1, + NumSids: 3, + Sids: []sr.Srv6Sid{{Addr: sidA.Addr}, {Addr: sidB.Addr}, {Addr: sidC.Addr}}, + }, })) }, }, { Name: "invalid SID (not IP address) in segment list", - BSID: sidA, + BSID: sidA.Addr, Policy: policy(10, false, true), PolicySegment: &srv6.PolicySegment{ Weight: 1, - Segments: []string{sidA.String(), invalidIPAddress, sidC.String()}, + Segments: []string{toString(sidA), invalidIPAddress, toString(sidC)}, }, SegmentIndex: 111, MockReply: &sr.SrPolicyModReply{}, @@ -685,9 +692,9 @@ func TestDeletePolicySegment(t *testing.T) { }, { Name: "failure propagation from VPP", - BSID: sidA, + BSID: sidA.Addr, Policy: policy(0, true, true), - PolicySegment: policySegment(1, sidA, sidB, sidC), + PolicySegment: policySegment(1, sidA.Addr, sidB.Addr, sidC.Addr), SegmentIndex: 111, MockReply: &sr.SrPolicyModReply{Retval: 1}, Verify: func(err error, msg govppapi.Message) { @@ -734,7 +741,7 @@ func testAddRemoveSteering(t *testing.T, removal bool) { { Name: action + " of IPv6 L3 steering", Steering: &srv6.Steering{ - PolicyBsid: sidA.String(), + PolicyBsid: toString(sidA), L3Traffic: &srv6.Steering_L3Traffic{ FibTableId: 10, PrefixAddress: "1::/64", @@ -745,7 +752,7 @@ func testAddRemoveSteering(t *testing.T, removal bool) { Expect(err).ShouldNot(HaveOccurred()) Expect(catchedMsg).To(Equal(&sr.SrSteeringAddDel{ IsDel: boolToUint(removal), - BsidAddr: sidA, + BsidAddr: sidA.Addr, TableID: 10, TrafficType: vppcalls.SteerTypeIPv6, PrefixAddr: net.ParseIP("1::").To16(), @@ -756,7 +763,7 @@ func testAddRemoveSteering(t *testing.T, removal bool) { { Name: action + " of IPv4 L3 steering", Steering: &srv6.Steering{ - PolicyBsid: sidA.String(), + PolicyBsid: toString(sidA), L3Traffic: &srv6.Steering_L3Traffic{ FibTableId: 10, PrefixAddress: "1.2.3.4/24", @@ -767,7 +774,7 @@ func testAddRemoveSteering(t *testing.T, removal bool) { Expect(err).ShouldNot(HaveOccurred()) Expect(catchedMsg).To(Equal(&sr.SrSteeringAddDel{ IsDel: boolToUint(removal), - BsidAddr: sidA, + BsidAddr: sidA.Addr, TableID: 10, TrafficType: vppcalls.SteerTypeIPv4, PrefixAddr: net.ParseIP("1.2.3.4").To16(), @@ -778,7 +785,7 @@ func testAddRemoveSteering(t *testing.T, removal bool) { { Name: action + " of L2 steering", Steering: &srv6.Steering{ - PolicyBsid: sidA.String(), + PolicyBsid: toString(sidA), L2Traffic: &srv6.Steering_L2Traffic{ InterfaceName: ifaceA, }, @@ -788,7 +795,7 @@ func testAddRemoveSteering(t *testing.T, removal bool) { Expect(err).ShouldNot(HaveOccurred()) Expect(catchedMsg).To(Equal(&sr.SrSteeringAddDel{ IsDel: boolToUint(removal), - BsidAddr: sidA, + BsidAddr: sidA.Addr, TrafficType: vppcalls.SteerTypeL2, SwIfIndex: swIndexA, })) @@ -797,7 +804,7 @@ func testAddRemoveSteering(t *testing.T, removal bool) { { Name: "invalid prefix (" + action + " of IPv4 L3 steering)", Steering: &srv6.Steering{ - PolicyBsid: sidA.String(), + PolicyBsid: toString(sidA), L3Traffic: &srv6.Steering_L3Traffic{ FibTableId: 10, PrefixAddress: invalidIPAddress, @@ -811,7 +818,7 @@ func testAddRemoveSteering(t *testing.T, removal bool) { { Name: "interface without index (" + action + " of L2 steering)", Steering: &srv6.Steering{ - PolicyBsid: sidA.String(), + PolicyBsid: toString(sidA), L2Traffic: &srv6.Steering_L2Traffic{ InterfaceName: ifaceBOutOfidxs, }, @@ -838,7 +845,7 @@ func testAddRemoveSteering(t *testing.T, removal bool) { { Name: "failure propagation from VPP", Steering: &srv6.Steering{ - PolicyBsid: sidA.String(), + PolicyBsid: toString(sidA), L3Traffic: &srv6.Steering_L3Traffic{ FibTableId: 10, PrefixAddress: "1::/64", @@ -879,12 +886,14 @@ func teardown(ctx *vppcallmock.TestCtx) { ctx.TeardownTestCtx() } -func sid(str string) srv6.SID { +func sid(str string) *sr.Srv6Sid { bsid, err := srplugin.ParseIPv6(str) if err != nil { panic(fmt.Sprintf("can't parse %q into SRv6 BSID (IPv6 address)", str)) } - return bsid + return &sr.Srv6Sid{ + Addr: bsid, + } } func policy(fibtableID uint32, sprayBehaviour bool, srhEncapsulation bool) *srv6.Policy { @@ -913,3 +922,7 @@ func boolToUint(input bool) uint8 { } return uint8(0) } + +func toString(sid sr.Srv6Sid) string { + return srv6.SID(sid.Addr).String() +} From 3b2e4b1515c22e0570f8e36457076eeb9cb3e9fc Mon Sep 17 00:00:00 2001 From: Filip Gschwandtner Date: Fri, 13 Jul 2018 12:44:47 +0200 Subject: [PATCH 007/174] added unit tests for added API functionality for SR Signed-off-by: Filip Gschwandtner --- plugins/vpp/srplugin/vppcalls/srv6.go | 5 ++- plugins/vpp/srplugin/vppcalls/srv6_test.go | 47 ++++++++++++++++++++-- 2 files changed, 47 insertions(+), 5 deletions(-) diff --git a/plugins/vpp/srplugin/vppcalls/srv6.go b/plugins/vpp/srplugin/vppcalls/srv6.go index 8f84cf90ff..6c1b7d03fc 100644 --- a/plugins/vpp/srplugin/vppcalls/srv6.go +++ b/plugins/vpp/srplugin/vppcalls/srv6.go @@ -217,10 +217,11 @@ func (calls *srv6Calls) writeEndFunction(req *sr.SrLocalsidAddDel, sidAddr net.I if err != nil { return err } - if nhAddr.To4() == nil { + nhAddr4 := nhAddr.To4() + if nhAddr4 == nil { return fmt.Errorf("next hop of DX4 end function (%v) is not valid IPv4 address", localSID.EndFunction_DX4.NextHop) } - req.NhAddr4 = []byte(nhAddr) + req.NhAddr4 = []byte(nhAddr4) } else if localSID.EndFunction_DX6 != nil { req.Behavior = BehaviorDX6 interfaceSwIndex, _, exists := swIfIndex.LookupIdx(localSID.EndFunction_DX6.OutgoingInterface) diff --git a/plugins/vpp/srplugin/vppcalls/srv6_test.go b/plugins/vpp/srplugin/vppcalls/srv6_test.go index 52540e5908..da2d0ddcc5 100644 --- a/plugins/vpp/srplugin/vppcalls/srv6_test.go +++ b/plugins/vpp/srplugin/vppcalls/srv6_test.go @@ -43,7 +43,7 @@ var ( sidB = *sid("B::") sidC = *sid("C::") nextHop = net.ParseIP("B::").To16() - nextHopIPv4 = net.ParseIP("1.2.3.4") + nextHopIPv4 = net.ParseIP("1.2.3.4").To4() ) var swIfIndex = ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "sw_if_indexes", ifaceidx.IndexMetadata)) @@ -80,7 +80,7 @@ func TestAddLocalSID(t *testing.T) { }, }, { - Name: "addition with endX behaviour", + Name: "addition with endX behaviour (ipv6 next hop address)", Input: &srv6.LocalSID{ FibTableId: 10, EndFunction_X: &srv6.LocalSID_EndX{ @@ -99,6 +99,26 @@ func TestAddLocalSID(t *testing.T) { NhAddr6: nextHop, }, }, + { + Name: "addition with endX behaviour (ipv4 next hop address)", + Input: &srv6.LocalSID{ + FibTableId: 10, + EndFunction_X: &srv6.LocalSID_EndX{ + Psp: true, + NextHop: nextHopIPv4.String(), + OutgoingInterface: ifaceA, + }, + }, + Expected: &sr.SrLocalsidAddDel{ + IsDel: 0, + Localsid: sidA, + Behavior: vppcalls.BehaviorX, + FibTable: 10, + EndPsp: 1, + SwIfIndex: swIndexA, + NhAddr4: nextHopIPv4, + }, + }, { Name: "addition with endT behaviour", Input: &srv6.LocalSID{ @@ -116,7 +136,7 @@ func TestAddLocalSID(t *testing.T) { }, }, { - Name: "addition with endDX2 behaviour", + Name: "addition with endDX2 behaviour (ipv6 next hop address)", Input: &srv6.LocalSID{ FibTableId: 10, EndFunction_DX2: &srv6.LocalSID_EndDX2{ @@ -136,6 +156,27 @@ func TestAddLocalSID(t *testing.T) { NhAddr6: nextHop, }, }, + { + Name: "addition with endDX2 behaviour (ipv4 next hop address)", + Input: &srv6.LocalSID{ + FibTableId: 10, + EndFunction_DX2: &srv6.LocalSID_EndDX2{ + VlanTag: 1, + NextHop: nextHopIPv4.String(), + OutgoingInterface: ifaceA, + }, + }, + Expected: &sr.SrLocalsidAddDel{ + IsDel: 0, + Localsid: sidA, + Behavior: vppcalls.BehaviorDX2, + FibTable: 10, + EndPsp: 0, + VlanIndex: 1, + SwIfIndex: swIndexA, + NhAddr4: nextHopIPv4, + }, + }, { Name: "addition with endDX4 behaviour", Input: &srv6.LocalSID{ From 1f9e3604393046c2adf96094678e05de00181966 Mon Sep 17 00:00:00 2001 From: Filip Gschwandtner Date: Mon, 16 Jul 2018 14:13:50 +0200 Subject: [PATCH 008/174] fixed return value and initialization refactoring in SR Signed-off-by: Filip Gschwandtner --- plugins/vpp/srplugin/vppcalls/srv6.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/plugins/vpp/srplugin/vppcalls/srv6.go b/plugins/vpp/srplugin/vppcalls/srv6.go index 6c1b7d03fc..ba4d5d955d 100644 --- a/plugins/vpp/srplugin/vppcalls/srv6.go +++ b/plugins/vpp/srplugin/vppcalls/srv6.go @@ -388,16 +388,12 @@ func (calls *srv6Calls) modPolicy(operation uint8, bindingSid net.IP, policy *sr } func (calls *srv6Calls) convertPolicySegment(policySegment *srv6.PolicySegment) (*sr.Srv6SidList, error) { - segments := make([]sr.Srv6Sid, 0) + var segments []sr.Srv6Sid for _, sid := range policySegment.Segments { // parse to IPv6 address parserSid, err := parseIPv6(sid) if err != nil { - return &sr.Srv6SidList{ - NumSids: 0, - Weight: 0, - Sids: []sr.Srv6Sid{}, - }, err + return nil, err } // add sid to segment list @@ -447,7 +443,7 @@ func (calls *srv6Calls) addDelSteering(delete bool, steering *srv6.Steering, swI } // converting policy reference - bsidAddr := make([]byte, 0) + var bsidAddr []byte if len(strings.Trim(steering.PolicyBsid, " ")) > 0 { bsid, err := parseIPv6(steering.PolicyBsid) if err != nil { From 3543e48b26a633b7dd01ab52a6de60a33116f21a Mon Sep 17 00:00:00 2001 From: Filip Gschwandtner Date: Mon, 16 Jul 2018 17:40:08 +0200 Subject: [PATCH 009/174] fixed application of multiple patches Signed-off-by: Filip Gschwandtner --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index d3d00c6d28..ddd0a48fae 100644 --- a/Makefile +++ b/Makefile @@ -143,8 +143,8 @@ generate-binapi: get-binapi-generators cd plugins/vpp/binapi/tapv2 && pkgreflect cd plugins/vpp/binapi/vpe && pkgreflect cd plugins/vpp/binapi/vxlan && pkgreflect - @echo "=> applying patches" - patch -p1 -i plugins/vpp/binapi/*.patch + @echo "=> applying fix patches" + find plugins/vpp/binapi -maxdepth 1 -type f -name '*.patch' -exec patch -p1 -i {} \; verify-binapi: @echo "=> verifying binary api" From 9f89560d6658ee546aaf999ebee7a4aae1f0f9f1 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Tue, 17 Jul 2018 12:29:44 +0200 Subject: [PATCH 010/174] use default values in example config files Signed-off-by: Vladimir Lavor --- plugins/govppmux/govpp.conf | 6 +++--- plugins/linux/linuxplugin.conf | 2 +- plugins/vpp/vpp-plugin.conf | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/plugins/govppmux/govpp.conf b/plugins/govppmux/govpp.conf index 8692ee3502..28ff5458b6 100644 --- a/plugins/govppmux/govpp.conf +++ b/plugins/govppmux/govpp.conf @@ -1,6 +1,6 @@ -# set custom shared memory prefix for VPP -shm-prefix: vpp +# Custom shared memory prefix for VPP. Not used by default. +shm-prefix: # If VPP lost connection, this flag allows to automatically run the whole resync procedure -# for all registered plugins after reconnection +# for all registered plugins after reconnection. resync-after-reconnect: false diff --git a/plugins/linux/linuxplugin.conf b/plugins/linux/linuxplugin.conf index ae4764e219..6e09eb0564 100644 --- a/plugins/linux/linuxplugin.conf +++ b/plugins/linux/linuxplugin.conf @@ -1,6 +1,6 @@ # Enable or disable feature to measure netlink API call duration. Measured time is shown directly in log (info level). # Measurement is taken also for certain procedures, like resync of plugin startup. Turned off by default. -stopwatch: true +stopwatch: false # Used to disable entire linux plugin functionality. Turned off by default. disabled: false \ No newline at end of file diff --git a/plugins/vpp/vpp-plugin.conf b/plugins/vpp/vpp-plugin.conf index d718e4dd0f..f10ad2e04c 100644 --- a/plugins/vpp/vpp-plugin.conf +++ b/plugins/vpp/vpp-plugin.conf @@ -3,11 +3,11 @@ # Default maximum transmission unit. The value is used if an interface without MTU is created (it means MTU in # interface configuration is preferred). -mtu: 1478 +mtu: 0 # Enable or disable feature to measure binary API call duration. Measured time is shown directly in log (info level). # Measurement is taken also for certain procedures, like resync of plugin startup. Turned off by default. -stopwatch: true +stopwatch: false # VPP plugin resync strategy. Available options are [full] and [optimize]. Full strategy is default, and always performs # the resync, optimize can be used for cold start; it looks for interface configuration looking for interfaces. If there @@ -15,5 +15,5 @@ stopwatch: true strategy: full # VPP agent allows to send status data back to ETCD. To allow it, add desired status publishers. Currently supported -# for [etcd] and [redis] (both options can be chosen) -status-publishers: [redis] \ No newline at end of file +# for [etcd] and [redis] (both options can be chosen together) +status-publishers: \ No newline at end of file From 1663466008d59cf6fb7aed51932b331049480807 Mon Sep 17 00:00:00 2001 From: Ondrej Fabry Date: Wed, 18 Jul 2018 08:52:12 +0200 Subject: [PATCH 011/174] Cut table name after zero byte Signed-off-by: Ondrej Fabry --- plugins/vpp/l3plugin/vppdump/dump_vppcalls.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go b/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go index 85689fa17a..4362d66728 100644 --- a/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go +++ b/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go @@ -15,6 +15,7 @@ package vppdump import ( + "bytes" "fmt" "net" @@ -108,7 +109,7 @@ func dumpStaticRouteIPDetails(tableID uint32, tableName []byte, address []byte, return nil, err } - rt.TableName = string(tableName) + rt.TableName = string(bytes.SplitN(tableName, []byte{0x00}, 2)[0]) rt.VrfID = tableID rt.DstAddr = *parsedIP From 975a798905cac751da7e65e13050573cd52da2ce Mon Sep 17 00:00:00 2001 From: Marcel Sestak Date: Wed, 18 Jul 2018 13:58:43 +0200 Subject: [PATCH 012/174] few tests changes Signed-off-by: Marcel Sestak --- tests/robot/suites/crudIPv6/linux_ip_route_crudIPv6.robot | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/robot/suites/crudIPv6/linux_ip_route_crudIPv6.robot b/tests/robot/suites/crudIPv6/linux_ip_route_crudIPv6.robot index 4dd56507e6..19f68fd36a 100644 --- a/tests/robot/suites/crudIPv6/linux_ip_route_crudIPv6.robot +++ b/tests/robot/suites/crudIPv6/linux_ip_route_crudIPv6.robot @@ -17,7 +17,7 @@ Test Teardown TestTeardown *** Variables *** ${VARIABLES}= common ${ENV}= common -${CONFIG_SLEEP}= 10s +${CONFIG_SLEEP}= 50s ${RESYNC_SLEEP}= 15s # wait for resync vpps after restart ${RESYNC_WAIT}= 30s From 947066fea3bc4334c1785f63d92e67371c7149e2 Mon Sep 17 00:00:00 2001 From: Rastislav Szabo Date: Wed, 18 Jul 2018 15:40:55 +0200 Subject: [PATCH 013/174] Support for inter-VRF routing Signed-off-by: Rastislav Szabo --- plugins/vpp/data_change.go | 4 +- plugins/vpp/ifplugin/vppcalls/vrf_vppcalls.go | 2 +- plugins/vpp/l3plugin/data_resync.go | 36 +++- plugins/vpp/l3plugin/route_config.go | 4 +- plugins/vpp/l3plugin/route_utils.go | 33 +++- .../vpp/l3plugin/vppcalls/route_vppcalls.go | 32 +++- plugins/vpp/l3plugin/vppdump/dump_vppcalls.go | 9 +- plugins/vpp/model/l3/keys_agent_l3.go | 4 + plugins/vpp/model/l3/l3.pb.go | 98 ++++++----- plugins/vpp/model/l3/l3.proto | 10 +- plugins/vpp/model/rpc/rpc.pb.go | 155 +++++++++--------- 11 files changed, 238 insertions(+), 149 deletions(-) diff --git a/plugins/vpp/data_change.go b/plugins/vpp/data_change.go index 8cbbea1cf7..a972cceff5 100644 --- a/plugins/vpp/data_change.go +++ b/plugins/vpp/data_change.go @@ -156,9 +156,7 @@ func (plugin *Plugin) changePropagateRequest(dataChng datasync.ChangeEvent, call return false, err } } else { - // Vrf - // TODO vrf not implemented yet - plugin.Log.Warn("VRFs are not supported yet") + plugin.Log.Warnf("Key '%s' not supported", key) } } else if strings.HasPrefix(key, l3.ArpKeyPrefix()) { _, _, err := l3.ParseArpKey(key) diff --git a/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls.go index e61194835d..70cab229f1 100644 --- a/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls.go @@ -93,7 +93,7 @@ func CreateVrfIfNeeded(vrfID uint32, vppChan VPPChannel) error { return err } if _, ok := tables[vrfID]; !ok { - logrus.DefaultLogger().Warnf("VRF table %v does not exists, creating it", vrfID) + logrus.DefaultLogger().Infof("VRF table %v does not exists, creating it", vrfID) return vppAddIPTable(vrfID, vppChan) } diff --git a/plugins/vpp/l3plugin/data_resync.go b/plugins/vpp/l3plugin/data_resync.go index 2fbe33385c..b928adb951 100644 --- a/plugins/vpp/l3plugin/data_resync.go +++ b/plugins/vpp/l3plugin/data_resync.go @@ -15,8 +15,10 @@ package l3plugin import ( + "fmt" "github.com/ligato/cn-infra/logging/measure" l3ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" + "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppdump" "github.com/ligato/vpp-agent/plugins/vpp/model/l3" ) @@ -46,10 +48,15 @@ func (plugin *RouteConfigurator) Resync(nbRoutes []*l3.StaticRoutes_Route) error nbRouteID := routeIdentifier(nbRoute.VrfId, nbRoute.DstIpAddr, nbRoute.NextHopAddr) nbIfIdx, _, found := plugin.ifIndexes.LookupIdx(nbRoute.OutgoingInterface) if !found { - plugin.log.Debugf("RESYNC routes: outgoing interface not found for %s", nbRouteID) - plugin.rtCachedIndexes.RegisterName(nbRouteID, plugin.rtIndexSeq, nbRoute) - plugin.rtIndexSeq++ - continue + if isVrfLookupRoute(nbRoute) { + // expected by VRF lookup route + nbIfIdx = vppcalls.NextHopOutgoingIfUnset + } else { + plugin.log.Debugf("RESYNC routes: outgoing interface not found for %s", nbRouteID) + plugin.rtCachedIndexes.RegisterName(nbRouteID, plugin.rtIndexSeq, nbRoute) + plugin.rtIndexSeq++ + continue + } } // Default VPP value for weight in case it is not set if nbRoute.Weight == 0 { @@ -85,8 +92,23 @@ func (plugin *RouteConfigurator) Resync(nbRoutes []*l3.StaticRoutes_Route) error continue } if vppRoute.NextHopAddr.String() != nbRoute.NextHopAddr { - plugin.log.Debugf("RESYNC routes: next hop address is different (NB: %d, VPP %d)", - nbRoute.NextHopAddr, vppRoute.NextHopAddr.String()) + if nbRoute.NextHopAddr == "" && vppRoute.NextHopAddr.IsUnspecified() { + plugin.log.Debugf("RESYNC routes: empty next hop address matched (NB: %s, VPP %s)", + nbRoute.NextHopAddr, vppRoute.NextHopAddr.String()) + } else { + plugin.log.Debugf("RESYNC routes: next hop address is different (NB: %s, VPP %s)", + nbRoute.NextHopAddr, vppRoute.NextHopAddr.String()) + continue + } + } + if vppRoute.NextHopVrfId != nbRoute.NextHopVrfId { + plugin.log.Debugf("RESYNC routes: next hop VRF ID is different (NB: %d, VPP %d)", + nbRoute.NextHopVrfId, vppRoute.NextHopVrfId) + continue + } + if vppRoute.LookupVrfID != nbRoute.LookupVrfId { + plugin.log.Debugf("RESYNC routes: Lookup VRF ID is different (NB: %d, VPP %d)", + nbRoute.LookupVrfId, vppRoute.LookupVrfID) continue } // Register existing routes @@ -106,7 +128,7 @@ func (plugin *RouteConfigurator) Resync(nbRoutes []*l3.StaticRoutes_Route) error if !found { // create new route if does not exist yet. VRF ID is already validated at this point. plugin.log.Debugf("RESYNC routes: route %s not found and will be configured", routeID) - if err := plugin.ConfigureRoute(nbRoute, string(nbRoute.VrfId)); err != nil { + if err := plugin.ConfigureRoute(nbRoute, fmt.Sprintf("%d", nbRoute.VrfId)); err != nil { plugin.log.Error(err) wasError = err } diff --git a/plugins/vpp/l3plugin/route_config.go b/plugins/vpp/l3plugin/route_config.go index 958ecce908..3276cc8494 100644 --- a/plugins/vpp/l3plugin/route_config.go +++ b/plugins/vpp/l3plugin/route_config.go @@ -109,13 +109,15 @@ func (plugin *RouteConfigurator) clearMapping() { // Create unique identifier which serves as a name in name-to-index mapping. func routeIdentifier(vrf uint32, destination string, nextHop string) string { + if nextHop == "" { + nextHop = "" + } return fmt.Sprintf("vrf%v-%v-%v", vrf, destination, nextHop) } // ConfigureRoute processes the NB config and propagates it to bin api calls. func (plugin *RouteConfigurator) ConfigureRoute(config *l3.StaticRoutes_Route, vrfFromKey string) error { plugin.log.Infof("Configuring new route %v -> %v", config.DstIpAddr, config.NextHopAddr) - // Validate VRF index from key and it's value in data. if err := plugin.validateVrfFromKey(config, vrfFromKey); err != nil { return err diff --git a/plugins/vpp/l3plugin/route_utils.go b/plugins/vpp/l3plugin/route_utils.go index 65cde9168d..9f77fabd83 100644 --- a/plugins/vpp/l3plugin/route_utils.go +++ b/plugins/vpp/l3plugin/route_utils.go @@ -54,7 +54,9 @@ func eqRoutes(a *vppcalls.Route, b *vppcalls.Route) bool { bytes.Equal(a.DstAddr.IP, b.DstAddr.IP) && bytes.Equal(a.DstAddr.Mask, b.DstAddr.Mask) && bytes.Equal(a.NextHopAddr, b.NextHopAddr) && + a.NextHopVrfId == b.VrfID && a.OutIface == b.OutIface && + a.LookupVrfID == b.LookupVrfID && a.Weight == b.Weight && a.Preference == b.Preference } @@ -72,9 +74,15 @@ func lessRoute(a *vppcalls.Route, b *vppcalls.Route) bool { if !bytes.Equal(a.NextHopAddr, b.NextHopAddr) { return bytes.Compare(a.NextHopAddr, b.NextHopAddr) < 0 } + if a.NextHopVrfId != b.NextHopVrfId { + return a.NextHopVrfId < b.NextHopVrfId + } if a.OutIface != b.OutIface { return a.OutIface < b.OutIface } + if a.LookupVrfID != b.LookupVrfID { + return a.LookupVrfID < b.LookupVrfID + } if a.Preference != b.Preference { return a.Preference < b.Preference } @@ -89,8 +97,11 @@ func TransformRoute(routeInput *l3.StaticRoutes_Route, swIndex uint32, log loggi return nil, nil } if routeInput.DstIpAddr == "" { - log.Infof("Route does not contain destination address") - return nil, nil + if !isVrfLookupRoute(routeInput) { + // no destination address is only allowed for VRF lookup route + log.Infof("Route does not contain destination address") + return nil, nil + } } parsedDestIP, isIpv6, err := addrs.ParseIPWithPrefix(routeInput.DstIpAddr) if err != nil { @@ -105,12 +116,14 @@ func TransformRoute(routeInput *l3.StaticRoutes_Route, swIndex uint32, log loggi nextHopIP = nextHopIP.To4() } route := &vppcalls.Route{ - VrfID: vrfID, - DstAddr: *parsedDestIP, - NextHopAddr: nextHopIP, - OutIface: swIndex, - Weight: routeInput.Weight, - Preference: routeInput.Preference, + VrfID: vrfID, + DstAddr: *parsedDestIP, + NextHopAddr: nextHopIP, + NextHopVrfId: routeInput.NextHopVrfId, + OutIface: swIndex, + LookupVrfID: routeInput.LookupVrfId, + Weight: routeInput.Weight, + Preference: routeInput.Preference, } return route, nil } @@ -160,3 +173,7 @@ func (plugin *RouteConfigurator) diffRoutes(new []*vppcalls.Route, old []*vppcal } return } + +func isVrfLookupRoute(r *l3.StaticRoutes_Route) bool { + return r.OutgoingInterface == "" && r.VrfId != r.LookupVrfId +} diff --git a/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go b/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go index 22e5fb7ea0..666dbcd0c5 100644 --- a/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go +++ b/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go @@ -38,13 +38,15 @@ var RouteMessages = []govppapi.Message{ // Route represents a forward IP route entry with the parameters of gateway // to which packets should be forwarded when a given routing table entry is applied. type Route struct { - VrfID uint32 `json:"vrf_id"` - TableName string `json:"table_name"` - DstAddr net.IPNet `json:"dst_addr"` - NextHopAddr net.IP `json:"next_hop_addr"` - OutIface uint32 `json:"out_iface"` - Weight uint32 `json:"weight"` - Preference uint32 `json:"preference"` + VrfID uint32 `json:"vrf_id"` + TableName string `json:"table_name"` + DstAddr net.IPNet `json:"dst_addr"` + NextHopAddr net.IP `json:"next_hop_addr"` + NextHopVrfId uint32 `json:"next_hop_vrf_id"` + OutIface uint32 `json:"out_iface"` + LookupVrfID uint32 `json:"lookup_vrf_id"` + Weight uint32 `json:"weight"` + Preference uint32 `json:"preference"` } const ( @@ -95,13 +97,18 @@ func vppAddDelRoute(route *Route, vppChan VPPChannel, delete bool, stopwatch *me req.NextHopSwIfIndex = route.OutIface req.NextHopWeight = uint8(route.Weight) req.NextHopPreference = uint8(route.Preference) - req.NextHopTableID = route.VrfID req.NextHopViaLabel = NextHopViaLabelUnset req.ClassifyTableIndex = ClassifyTableIndexUnset req.IsDrop = 0 // VRF req.TableID = route.VrfID + if isVrfLookupRoute(route) { + // next hop not specified = VRF lookup + req.NextHopTableID = route.LookupVrfID + } else { + req.NextHopTableID = route.NextHopVrfId + } // Multi path is always true req.IsMultipath = 1 @@ -123,6 +130,11 @@ func VppAddRoute(route *Route, vppChan ifvppcalls.VPPChannel, stopwatch *measure if err := ifvppcalls.CreateVrfIfNeeded(route.VrfID, vppChan); err != nil { return err } + if isVrfLookupRoute(route) { + if err := ifvppcalls.CreateVrfIfNeeded(route.LookupVrfID, vppChan); err != nil { + return err + } + } return vppAddDelRoute(route, vppChan, false, stopwatch) } @@ -130,3 +142,7 @@ func VppAddRoute(route *Route, vppChan ifvppcalls.VPPChannel, stopwatch *measure func VppDelRoute(route *Route, vppChan VPPChannel, stopwatch *measure.Stopwatch) error { return vppAddDelRoute(route, vppChan, true, stopwatch) } + +func isVrfLookupRoute(r *Route) bool { + return r.OutIface == NextHopOutgoingIfUnset && r.VrfID != r.LookupVrfID +} diff --git a/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go b/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go index b2d3bcd197..3ea1455f15 100644 --- a/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go +++ b/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go @@ -51,7 +51,6 @@ func DumpStaticRoutes(log logging.Logger, vppChan vppcalls.VPPChannel, timeLog m log.Error(err) return nil, err } - ipv4Route, err := dumpStaticRouteIPv4Details(fibDetails) if err != nil { return nil, err @@ -120,6 +119,14 @@ func dumpStaticRouteIPDetails(tableID uint32, tableName []byte, address []byte, } rt.NextHopAddr = nextHopAddr + + if nextHopAddr.IsUnspecified() && path[0].SwIfIndex == vppcalls.NextHopOutgoingIfUnset { + // next hop IP nor outgoing interface is specified = VRF lookup route + rt.LookupVrfID = path[0].TableID + } else { + rt.NextHopVrfId = path[0].TableID + } + rt.OutIface = path[0].SwIfIndex rt.Preference = uint32(path[0].Preference) rt.Weight = uint32(path[0].Weight) diff --git a/plugins/vpp/model/l3/keys_agent_l3.go b/plugins/vpp/model/l3/keys_agent_l3.go index 86f933a27b..9e480c4196 100644 --- a/plugins/vpp/model/l3/keys_agent_l3.go +++ b/plugins/vpp/model/l3/keys_agent_l3.go @@ -88,6 +88,10 @@ func ParseRouteKey(key string) (isRouteKey bool, vrfIndex string, dstNetAddr str if mask, err := strconv.Atoi(routeComps[3]); err == nil { return true, routeComps[0], routeComps[2], mask, routeComps[4] } + } else if len(routeComps) == 4 && routeComps[1] == "fib" { + if mask, err := strconv.Atoi(routeComps[3]); err == nil { + return true, routeComps[0], routeComps[2], mask, "" + } } } return false, "", "", 0, "" diff --git a/plugins/vpp/model/l3/l3.pb.go b/plugins/vpp/model/l3/l3.pb.go index c66e42da8e..840b0f3af1 100644 --- a/plugins/vpp/model/l3/l3.pb.go +++ b/plugins/vpp/model/l3/l3.pb.go @@ -53,9 +53,11 @@ type StaticRoutes_Route struct { Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` DstIpAddr string `protobuf:"bytes,3,opt,name=dst_ip_addr,json=dstIpAddr,proto3" json:"dst_ip_addr,omitempty"` NextHopAddr string `protobuf:"bytes,4,opt,name=next_hop_addr,json=nextHopAddr,proto3" json:"next_hop_addr,omitempty"` - OutgoingInterface string `protobuf:"bytes,5,opt,name=outgoing_interface,json=outgoingInterface,proto3" json:"outgoing_interface,omitempty"` - Weight uint32 `protobuf:"varint,6,opt,name=weight,proto3" json:"weight,omitempty"` - Preference uint32 `protobuf:"varint,7,opt,name=preference,proto3" json:"preference,omitempty"` + NextHopVrfId uint32 `protobuf:"varint,5,opt,name=next_hop_vrf_id,json=nextHopVrfId,proto3" json:"next_hop_vrf_id,omitempty"` + OutgoingInterface string `protobuf:"bytes,6,opt,name=outgoing_interface,json=outgoingInterface,proto3" json:"outgoing_interface,omitempty"` + LookupVrfId uint32 `protobuf:"varint,7,opt,name=lookup_vrf_id,json=lookupVrfId,proto3" json:"lookup_vrf_id,omitempty"` + Weight uint32 `protobuf:"varint,8,opt,name=weight,proto3" json:"weight,omitempty"` + Preference uint32 `protobuf:"varint,9,opt,name=preference,proto3" json:"preference,omitempty"` } func (m *StaticRoutes_Route) Reset() { *m = StaticRoutes_Route{} } @@ -91,6 +93,13 @@ func (m *StaticRoutes_Route) GetNextHopAddr() string { return "" } +func (m *StaticRoutes_Route) GetNextHopVrfId() uint32 { + if m != nil { + return m.NextHopVrfId + } + return 0 +} + func (m *StaticRoutes_Route) GetOutgoingInterface() string { if m != nil { return m.OutgoingInterface @@ -98,6 +107,13 @@ func (m *StaticRoutes_Route) GetOutgoingInterface() string { return "" } +func (m *StaticRoutes_Route) GetLookupVrfId() uint32 { + if m != nil { + return m.LookupVrfId + } + return 0 +} + func (m *StaticRoutes_Route) GetWeight() uint32 { if m != nil { return m.Weight @@ -360,41 +376,43 @@ func init() { func init() { proto.RegisterFile("l3.proto", fileDescriptorL3) } var fileDescriptorL3 = []byte{ - // 569 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0xd5, 0xa6, 0x8d, 0x13, 0x8f, 0x9b, 0x22, 0x46, 0x34, 0x18, 0xab, 0x94, 0x60, 0x71, 0xc8, - 0x05, 0x1f, 0x88, 0xc4, 0x81, 0x8a, 0x43, 0x0e, 0x48, 0x44, 0x2a, 0x15, 0x72, 0x7b, 0xb7, 0x9c, - 0x78, 0x93, 0xac, 0x64, 0xec, 0xd5, 0xee, 0xb6, 0x34, 0x57, 0xfe, 0x01, 0x0e, 0x7c, 0x03, 0xff, - 0xc1, 0xc7, 0xc0, 0x27, 0x70, 0x40, 0xde, 0x5d, 0x3b, 0x4e, 0x24, 0x50, 0x4f, 0x7e, 0x33, 0xf3, - 0x76, 0xbc, 0xef, 0xe5, 0xc5, 0xd0, 0xcf, 0x27, 0x11, 0x17, 0xa5, 0x2a, 0xb1, 0x93, 0x4f, 0xc2, - 0x1f, 0x1d, 0x38, 0xba, 0x52, 0xa9, 0x62, 0x8b, 0xb8, 0xbc, 0x51, 0x54, 0x62, 0x04, 0x8e, 0xd0, - 0xc8, 0x27, 0xa3, 0x83, 0xb1, 0xf7, 0x6a, 0x18, 0xe5, 0x93, 0xa8, 0xcd, 0x88, 0xf4, 0x23, 0xb6, - 0xac, 0xe0, 0x17, 0x81, 0xae, 0xee, 0xe0, 0x09, 0x38, 0xb7, 0x62, 0x99, 0xb0, 0xcc, 0x27, 0x23, - 0x32, 0x1e, 0xc4, 0xdd, 0x5b, 0xb1, 0x9c, 0x65, 0x38, 0x02, 0x2f, 0xa3, 0x72, 0x21, 0x18, 0x57, - 0xac, 0x2c, 0xfc, 0xce, 0x88, 0x8c, 0xdd, 0xb8, 0xdd, 0xc2, 0x33, 0xf0, 0x32, 0xa9, 0x12, 0xc6, - 0x93, 0x34, 0xcb, 0x84, 0x7f, 0xa0, 0x19, 0x6e, 0x26, 0xd5, 0x8c, 0x4f, 0xb3, 0x4c, 0x60, 0x08, - 0x83, 0x82, 0xde, 0xa9, 0x64, 0x5d, 0x5a, 0xc6, 0xa1, 0xd9, 0x51, 0x35, 0xdf, 0x97, 0x86, 0xf3, - 0x12, 0xb0, 0xbc, 0x51, 0xab, 0x92, 0x15, 0xab, 0x84, 0x15, 0x8a, 0x8a, 0x65, 0xba, 0xa0, 0x7e, - 0x57, 0x13, 0x1f, 0xd6, 0x93, 0x59, 0x3d, 0xc0, 0x21, 0x38, 0x9f, 0x29, 0x5b, 0xad, 0x95, 0xef, - 0xe8, 0xbb, 0xda, 0x0a, 0xcf, 0x00, 0xb8, 0xa0, 0x4b, 0x2a, 0x68, 0xb1, 0xa0, 0x7e, 0x4f, 0xcf, - 0x5a, 0x9d, 0xf0, 0x27, 0x81, 0xfe, 0x54, 0xf0, 0xeb, 0x74, 0x9e, 0x53, 0x7c, 0x0d, 0x5e, 0x2a, - 0x78, 0x42, 0x0b, 0x25, 0x58, 0xe3, 0xd7, 0x49, 0xe5, 0x57, 0x4d, 0xa9, 0xc0, 0xbb, 0x42, 0x89, - 0x4d, 0x0c, 0xa9, 0x41, 0x8c, 0xca, 0xe0, 0x8b, 0x59, 0xa2, 0x07, 0x78, 0x0a, 0xee, 0xf6, 0xbe, - 0xc4, 0x48, 0x6f, 0x1a, 0xf8, 0x14, 0xc0, 0xda, 0x42, 0xa5, 0xb4, 0xde, 0xb9, 0x4c, 0x4b, 0xa6, - 0x52, 0xe2, 0x73, 0x38, 0xe2, 0xeb, 0x8d, 0x6c, 0x08, 0xc6, 0x3a, 0xaf, 0xea, 0xd5, 0x94, 0x21, - 0x38, 0x52, 0xff, 0x7a, 0xda, 0xb5, 0x7e, 0x6c, 0xab, 0xf0, 0x37, 0x81, 0xe3, 0x8f, 0xa2, 0xbc, - 0xdb, 0x4c, 0x05, 0x8f, 0xd3, 0x62, 0x45, 0x25, 0xbe, 0x05, 0x4f, 0x54, 0x28, 0xc9, 0x99, 0x54, - 0xb5, 0x9e, 0xd3, 0x4a, 0xcf, 0x2e, 0x31, 0xd2, 0x8f, 0x0b, 0x26, 0x55, 0x0c, 0xa2, 0x86, 0x32, - 0xf8, 0x4e, 0xc0, 0x6d, 0x26, 0xf8, 0x08, 0xba, 0x79, 0x3a, 0xa7, 0xb9, 0xd5, 0x64, 0x0a, 0x7c, - 0x03, 0x8e, 0x3e, 0x51, 0x69, 0xa9, 0xb6, 0x87, 0xff, 0xdb, 0x6e, 0x50, 0x6c, 0x4f, 0x04, 0xe7, - 0xd0, 0xd5, 0x0d, 0x7c, 0x02, 0xfd, 0x25, 0x13, 0x3a, 0x31, 0x76, 0x7b, 0x4f, 0xd7, 0x33, 0x8e, - 0x8f, 0xa1, 0x97, 0xa7, 0x66, 0x62, 0xcc, 0x72, 0xaa, 0x72, 0xc6, 0xc3, 0x3f, 0x04, 0xb0, 0x7e, - 0x4f, 0x13, 0x03, 0x89, 0x1f, 0xe0, 0x41, 0x63, 0xf6, 0x8e, 0xec, 0x17, 0xed, 0x8b, 0x6d, 0x0f, - 0x44, 0x0d, 0xd4, 0xf2, 0x8f, 0x59, 0xbb, 0x94, 0xc1, 0x37, 0x02, 0x83, 0x1d, 0xc6, 0x3f, 0x6c, - 0xb8, 0x04, 0x68, 0x4e, 0xd6, 0x56, 0x44, 0xf7, 0x79, 0xe3, 0xb6, 0x8a, 0x5b, 0x1b, 0x82, 0x67, - 0xe0, 0x6e, 0xb3, 0x8d, 0x70, 0x58, 0xa4, 0x9f, 0xea, 0x30, 0x69, 0x1c, 0x7e, 0x25, 0xd0, 0xbf, - 0xba, 0xbe, 0x34, 0xb9, 0x3d, 0x07, 0x4f, 0xaa, 0x62, 0x2f, 0xb7, 0x81, 0xfe, 0x9f, 0x5b, 0x4a, - 0x03, 0x6c, 0x78, 0xa5, 0x2a, 0xea, 0xf0, 0x5e, 0xc0, 0x60, 0x67, 0xb8, 0x17, 0x51, 0xb2, 0x1f, - 0xd1, 0x9d, 0x7c, 0x77, 0xf6, 0xf2, 0x3d, 0x77, 0xf4, 0x97, 0x68, 0xf2, 0x37, 0x00, 0x00, 0xff, - 0xff, 0x3d, 0x02, 0x34, 0xb3, 0x95, 0x04, 0x00, 0x00, + // 601 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xd5, 0xa4, 0x8d, 0x6b, 0x5f, 0x27, 0xad, 0xbe, 0xd1, 0xd7, 0x60, 0xac, 0x52, 0x82, 0x05, + 0x52, 0x36, 0x78, 0x41, 0x24, 0x16, 0x54, 0x2c, 0xb2, 0x40, 0x22, 0x52, 0xa9, 0x90, 0x5b, 0xb1, + 0xb5, 0x9c, 0x78, 0x92, 0x8c, 0x30, 0xb6, 0x35, 0x33, 0x29, 0xcd, 0x96, 0x77, 0x80, 0x05, 0x0f, + 0xc2, 0x2b, 0xf0, 0x32, 0xec, 0xd9, 0xb0, 0x40, 0xbe, 0x33, 0x76, 0x9c, 0x48, 0x20, 0x56, 0xbe, + 0x3f, 0x67, 0xce, 0xdc, 0x7b, 0xe6, 0xc8, 0x60, 0x67, 0xe3, 0xb0, 0x14, 0x85, 0x2a, 0x68, 0x27, + 0x1b, 0x07, 0x3f, 0x3b, 0xd0, 0xbb, 0x56, 0x89, 0xe2, 0xf3, 0xa8, 0x58, 0x2b, 0x26, 0x69, 0x08, + 0x96, 0xc0, 0xc8, 0x23, 0xc3, 0x83, 0x91, 0xfb, 0x6c, 0x10, 0x66, 0xe3, 0xb0, 0x8d, 0x08, 0xf1, + 0x13, 0x19, 0x94, 0xff, 0xad, 0x03, 0x5d, 0xac, 0xd0, 0x53, 0xb0, 0x6e, 0xc5, 0x22, 0xe6, 0xa9, + 0x47, 0x86, 0x64, 0xd4, 0x8f, 0xba, 0xb7, 0x62, 0x31, 0x4d, 0xe9, 0x10, 0xdc, 0x94, 0xc9, 0xb9, + 0xe0, 0xa5, 0xe2, 0x45, 0xee, 0x75, 0x86, 0x64, 0xe4, 0x44, 0xed, 0x12, 0x3d, 0x07, 0x37, 0x95, + 0x2a, 0xe6, 0x65, 0x9c, 0xa4, 0xa9, 0xf0, 0x0e, 0x10, 0xe1, 0xa4, 0x52, 0x4d, 0xcb, 0x49, 0x9a, + 0x0a, 0x1a, 0x40, 0x3f, 0x67, 0x77, 0x2a, 0x5e, 0x15, 0x06, 0x71, 0xa8, 0x39, 0xaa, 0xe2, 0xeb, + 0x42, 0x63, 0x9e, 0xc0, 0x49, 0x83, 0x31, 0x53, 0x74, 0x71, 0x8a, 0x9e, 0x41, 0xbd, 0xc3, 0x61, + 0x9e, 0x02, 0x2d, 0xd6, 0x6a, 0x59, 0xf0, 0x7c, 0x19, 0xf3, 0x5c, 0x31, 0xb1, 0x48, 0xe6, 0xcc, + 0xb3, 0x90, 0xef, 0xbf, 0xba, 0x33, 0xad, 0x1b, 0xd5, 0xcd, 0x59, 0x51, 0xbc, 0x5f, 0x37, 0x9c, + 0x47, 0xc8, 0xe9, 0xea, 0xa2, 0xa6, 0x1c, 0x80, 0xf5, 0x91, 0xf1, 0xe5, 0x4a, 0x79, 0x36, 0x36, + 0x4d, 0x46, 0xcf, 0x01, 0x4a, 0xc1, 0x16, 0x4c, 0xb0, 0x7c, 0xce, 0x3c, 0x07, 0x7b, 0xad, 0x4a, + 0xf0, 0x9d, 0x80, 0x3d, 0x11, 0xe5, 0x4d, 0x32, 0xcb, 0x18, 0x7d, 0x0e, 0x6e, 0x22, 0xca, 0x98, + 0xe5, 0x4a, 0xf0, 0x46, 0xfa, 0xd3, 0x4a, 0xfa, 0x1a, 0x52, 0x05, 0xaf, 0x72, 0x25, 0x36, 0x11, + 0x24, 0x3a, 0xe2, 0x4c, 0xfa, 0x9f, 0x34, 0x09, 0x36, 0xe8, 0x19, 0x38, 0xdb, 0x9d, 0x88, 0x56, + 0xb1, 0x29, 0xd0, 0x07, 0x00, 0x46, 0x61, 0x26, 0xa5, 0x79, 0x06, 0x87, 0xa3, 0x7a, 0x4c, 0x4a, + 0xfa, 0x08, 0x7a, 0xe5, 0x6a, 0x23, 0x1b, 0x80, 0x7e, 0x05, 0xb7, 0xaa, 0xd5, 0x90, 0x01, 0x58, + 0x12, 0x8d, 0x80, 0x0f, 0x60, 0x47, 0x26, 0x0b, 0x7e, 0x10, 0x38, 0x7e, 0x2b, 0x8a, 0xbb, 0xcd, + 0x44, 0x94, 0x51, 0x92, 0x2f, 0x99, 0xa4, 0x2f, 0xc1, 0x15, 0x55, 0x14, 0x67, 0x5c, 0xaa, 0x7a, + 0x9f, 0xb3, 0x6a, 0x9f, 0x5d, 0x60, 0x88, 0x9f, 0x4b, 0x2e, 0x55, 0x04, 0xa2, 0x0e, 0xa5, 0xff, + 0x95, 0x80, 0xd3, 0x74, 0xe8, 0xff, 0xd0, 0xcd, 0x92, 0x19, 0xcb, 0xcc, 0x4e, 0x3a, 0xa1, 0x2f, + 0xc0, 0xc2, 0x13, 0xd5, 0x2e, 0x15, 0x7b, 0xf0, 0x37, 0x76, 0x1d, 0x45, 0xe6, 0x84, 0x7f, 0x01, + 0x5d, 0x2c, 0xd0, 0xfb, 0x60, 0x2f, 0xb8, 0x40, 0xf3, 0x19, 0xf6, 0x23, 0xcc, 0xa7, 0x25, 0xbd, + 0x07, 0x47, 0x59, 0xa2, 0x3b, 0x5a, 0x2c, 0xab, 0x4a, 0xa7, 0x65, 0xf0, 0x8b, 0x00, 0xad, 0xef, + 0x69, 0xac, 0x22, 0xe9, 0x1b, 0x38, 0x69, 0xc4, 0xde, 0x59, 0xfb, 0x71, 0x7b, 0xb0, 0xed, 0x81, + 0xb0, 0x09, 0x71, 0xfd, 0x63, 0xde, 0x4e, 0xa5, 0xff, 0x85, 0x40, 0x7f, 0x07, 0xf1, 0x07, 0x19, + 0xae, 0x00, 0x9a, 0x93, 0xb5, 0x14, 0xe1, 0xbf, 0xdc, 0xb8, 0xcd, 0xa2, 0x16, 0x83, 0xff, 0x10, + 0x9c, 0xad, 0xff, 0x29, 0x1c, 0xe6, 0xc9, 0x87, 0xda, 0x4c, 0x18, 0x07, 0x9f, 0x09, 0xd8, 0xd7, + 0x37, 0x57, 0xda, 0xb7, 0x17, 0xe0, 0x4a, 0x95, 0xef, 0xf9, 0xd6, 0xc7, 0x5f, 0x86, 0x81, 0x34, + 0x81, 0x31, 0xaf, 0x54, 0x79, 0x6d, 0xde, 0x4b, 0xe8, 0xef, 0x34, 0xf7, 0x2c, 0x4a, 0xf6, 0x2d, + 0xba, 0xe3, 0xef, 0xce, 0x9e, 0xbf, 0x67, 0x16, 0xfe, 0xd4, 0xc6, 0xbf, 0x03, 0x00, 0x00, 0xff, + 0xff, 0x37, 0xf9, 0x27, 0x79, 0xe0, 0x04, 0x00, 0x00, } diff --git a/plugins/vpp/model/l3/l3.proto b/plugins/vpp/model/l3/l3.proto index 86aa2e7a50..f8e17a15f1 100644 --- a/plugins/vpp/model/l3/l3.proto +++ b/plugins/vpp/model/l3/l3.proto @@ -11,9 +11,13 @@ message StaticRoutes { string description = 2; /* optional description */ string dst_ip_addr = 3; /* ip address + prefix in format
/ */ string next_hop_addr = 4; /* next hop address */ - string outgoing_interface = 5; /* outgoing interface name */ - uint32 weight = 6; /* weight (used for unequal cost load balncing) */ - uint32 preference = 7; /* The preference of the path. Lowest preference is preferred. */ + uint32 next_hop_vrf_id = 5; /* Next hop VRF ID. Should be equal to the vrf_id unless inter-VRF + routing is needed. */ + string outgoing_interface = 6; /* outgoing interface name */ + uint32 lookup_vrf_id = 7; /* Do a lookup in the specified VRF. In effect only if the next_hop_addr + nor the outgoing_interface is set. */ + uint32 weight = 8; /* weight (used for unequal cost load balncing) */ + uint32 preference = 9; /* The preference of the path. Lowest preference is preferred. */ /* Only paths with the best preference contribute to forwarding. */ /* (a poor man's primary and backup) */ } diff --git a/plugins/vpp/model/rpc/rpc.pb.go b/plugins/vpp/model/rpc/rpc.pb.go index a160b0bb28..b92f789be2 100644 --- a/plugins/vpp/model/rpc/rpc.pb.go +++ b/plugins/vpp/model/rpc/rpc.pb.go @@ -51,27 +51,27 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // items of every type. Universal type for every data change/resync request type DataRequest struct { // vppplugin - AccessLists []*acl.AccessLists_Acl `protobuf:"bytes,10,rep,name=AccessLists" json:"AccessLists,omitempty"` - Interfaces []*interfaces.Interfaces_Interface `protobuf:"bytes,20,rep,name=Interfaces" json:"Interfaces,omitempty"` - BfdSessions []*bfd.SingleHopBFD_Session `protobuf:"bytes,30,rep,name=BfdSessions" json:"BfdSessions,omitempty"` - BfdAuthKeys []*bfd.SingleHopBFD_Key `protobuf:"bytes,31,rep,name=BfdAuthKeys" json:"BfdAuthKeys,omitempty"` - BfdEchoFunction *bfd.SingleHopBFD_EchoFunction `protobuf:"bytes,32,opt,name=BfdEchoFunction" json:"BfdEchoFunction,omitempty"` - BridgeDomains []*l2.BridgeDomains_BridgeDomain `protobuf:"bytes,40,rep,name=BridgeDomains" json:"BridgeDomains,omitempty"` - FIBs []*l2.FibTable_FibEntry `protobuf:"bytes,41,rep,name=FIBs" json:"FIBs,omitempty"` - XCons []*l2.XConnectPairs_XConnectPair `protobuf:"bytes,42,rep,name=XCons" json:"XCons,omitempty"` - StaticRoutes []*l3.StaticRoutes_Route `protobuf:"bytes,50,rep,name=StaticRoutes" json:"StaticRoutes,omitempty"` - ArpEntries []*l3.ArpTable_ArpEntry `protobuf:"bytes,51,rep,name=ArpEntries" json:"ArpEntries,omitempty"` - ProxyArpInterfaces []*l3.ProxyArpInterfaces_InterfaceList `protobuf:"bytes,52,rep,name=ProxyArpInterfaces" json:"ProxyArpInterfaces,omitempty"` - ProxyArpRanges []*l3.ProxyArpRanges_RangeList `protobuf:"bytes,53,rep,name=ProxyArpRanges" json:"ProxyArpRanges,omitempty"` - L4Feature *l4.L4Features `protobuf:"bytes,60,opt,name=L4Feature" json:"L4Feature,omitempty"` - ApplicationNamespaces []*l4.AppNamespaces_AppNamespace `protobuf:"bytes,61,rep,name=ApplicationNamespaces" json:"ApplicationNamespaces,omitempty"` - StnRules []*stn.STN_Rule `protobuf:"bytes,70,rep,name=StnRules" json:"StnRules,omitempty"` - NatGlobal *nat.Nat44Global `protobuf:"bytes,71,opt,name=NatGlobal" json:"NatGlobal,omitempty"` - DNATs []*nat.Nat44DNat_DNatConfig `protobuf:"bytes,72,rep,name=DNATs" json:"DNATs,omitempty"` + AccessLists []*acl.AccessLists_Acl `protobuf:"bytes,10,rep,name=AccessLists,json=accessLists" json:"AccessLists,omitempty"` + Interfaces []*interfaces.Interfaces_Interface `protobuf:"bytes,20,rep,name=Interfaces,json=interfaces" json:"Interfaces,omitempty"` + BfdSessions []*bfd.SingleHopBFD_Session `protobuf:"bytes,30,rep,name=BfdSessions,json=bfdSessions" json:"BfdSessions,omitempty"` + BfdAuthKeys []*bfd.SingleHopBFD_Key `protobuf:"bytes,31,rep,name=BfdAuthKeys,json=bfdAuthKeys" json:"BfdAuthKeys,omitempty"` + BfdEchoFunction *bfd.SingleHopBFD_EchoFunction `protobuf:"bytes,32,opt,name=BfdEchoFunction,json=bfdEchoFunction" json:"BfdEchoFunction,omitempty"` + BridgeDomains []*l2.BridgeDomains_BridgeDomain `protobuf:"bytes,40,rep,name=BridgeDomains,json=bridgeDomains" json:"BridgeDomains,omitempty"` + FIBs []*l2.FibTable_FibEntry `protobuf:"bytes,41,rep,name=FIBs,json=fIBs" json:"FIBs,omitempty"` + XCons []*l2.XConnectPairs_XConnectPair `protobuf:"bytes,42,rep,name=XCons,json=xCons" json:"XCons,omitempty"` + StaticRoutes []*l3.StaticRoutes_Route `protobuf:"bytes,50,rep,name=StaticRoutes,json=staticRoutes" json:"StaticRoutes,omitempty"` + ArpEntries []*l3.ArpTable_ArpEntry `protobuf:"bytes,51,rep,name=ArpEntries,json=arpEntries" json:"ArpEntries,omitempty"` + ProxyArpInterfaces []*l3.ProxyArpInterfaces_InterfaceList `protobuf:"bytes,52,rep,name=ProxyArpInterfaces,json=proxyArpInterfaces" json:"ProxyArpInterfaces,omitempty"` + ProxyArpRanges []*l3.ProxyArpRanges_RangeList `protobuf:"bytes,53,rep,name=ProxyArpRanges,json=proxyArpRanges" json:"ProxyArpRanges,omitempty"` + L4Feature *l4.L4Features `protobuf:"bytes,60,opt,name=L4Feature,json=l4Feature" json:"L4Feature,omitempty"` + ApplicationNamespaces []*l4.AppNamespaces_AppNamespace `protobuf:"bytes,61,rep,name=ApplicationNamespaces,json=applicationNamespaces" json:"ApplicationNamespaces,omitempty"` + StnRules []*stn.STN_Rule `protobuf:"bytes,70,rep,name=StnRules,json=stnRules" json:"StnRules,omitempty"` + NatGlobal *nat.Nat44Global `protobuf:"bytes,71,opt,name=NatGlobal,json=natGlobal" json:"NatGlobal,omitempty"` + DNATs []*nat.Nat44DNat_DNatConfig `protobuf:"bytes,72,rep,name=DNATs,json=dNATs" json:"DNATs,omitempty"` // Linuxplugin - LinuxInterfaces []*interfaces1.LinuxInterfaces_Interface `protobuf:"bytes,80,rep,name=LinuxInterfaces" json:"LinuxInterfaces,omitempty"` - LinuxArpEntries []*l31.LinuxStaticArpEntries_ArpEntry `protobuf:"bytes,90,rep,name=LinuxArpEntries" json:"LinuxArpEntries,omitempty"` - LinuxRoutes []*l31.LinuxStaticRoutes_Route `protobuf:"bytes,91,rep,name=LinuxRoutes" json:"LinuxRoutes,omitempty"` + LinuxInterfaces []*interfaces1.LinuxInterfaces_Interface `protobuf:"bytes,80,rep,name=LinuxInterfaces,json=linuxInterfaces" json:"LinuxInterfaces,omitempty"` + LinuxArpEntries []*l31.LinuxStaticArpEntries_ArpEntry `protobuf:"bytes,90,rep,name=LinuxArpEntries,json=linuxArpEntries" json:"LinuxArpEntries,omitempty"` + LinuxRoutes []*l31.LinuxStaticRoutes_Route `protobuf:"bytes,91,rep,name=LinuxRoutes,json=linuxRoutes" json:"LinuxRoutes,omitempty"` } func (m *DataRequest) Reset() { *m = DataRequest{} } @@ -571,61 +571,62 @@ var _NotificationService_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } var fileDescriptorRpc = []byte{ - // 894 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xe1, 0x6f, 0xe3, 0x34, - 0x14, 0xd7, 0x28, 0x77, 0x30, 0x97, 0xed, 0x86, 0xef, 0x86, 0xcc, 0x40, 0xa3, 0x4c, 0x20, 0x36, - 0x04, 0x0e, 0x4a, 0x7b, 0x20, 0x01, 0x93, 0x68, 0x97, 0xeb, 0x56, 0x6d, 0x94, 0xca, 0xed, 0x87, - 0x13, 0xf7, 0xc9, 0x4d, 0x9d, 0xce, 0x92, 0x6b, 0x87, 0xd8, 0x39, 0xad, 0x7f, 0x33, 0xff, 0x04, - 0xb2, 0x93, 0xb4, 0x4e, 0x57, 0xa4, 0x49, 0xdb, 0x87, 0x2c, 0x7e, 0xef, 0xfd, 0x7e, 0xbf, 0x3c, - 0xfb, 0x3d, 0xbf, 0x15, 0xec, 0x66, 0x69, 0x8c, 0xd3, 0x4c, 0x19, 0x05, 0x1b, 0x59, 0x1a, 0x1f, - 0xfd, 0x39, 0xe7, 0xe6, 0x36, 0x9f, 0xe2, 0x58, 0x2d, 0x02, 0xc1, 0xe7, 0xd4, 0xa8, 0xe0, 0x7d, - 0x9a, 0xfe, 0x48, 0xe7, 0x4c, 0x9a, 0x20, 0x15, 0xf9, 0x9c, 0x4b, 0x1d, 0xcc, 0x58, 0x42, 0x73, - 0x61, 0x2a, 0x33, 0x56, 0x8b, 0x85, 0x92, 0xc1, 0x42, 0xcd, 0x98, 0x08, 0x68, 0xec, 0x9e, 0x42, - 0xf3, 0xf1, 0x72, 0xd3, 0x64, 0x66, 0x9f, 0x52, 0xee, 0xdd, 0x63, 0xe5, 0xb8, 0x34, 0x2c, 0x4b, - 0x68, 0xcc, 0xb4, 0xb7, 0x2c, 0xc5, 0xaf, 0x1f, 0x2b, 0x2e, 0xc2, 0x40, 0x84, 0x4f, 0x26, 0xd6, - 0x0e, 0x44, 0xfb, 0xc9, 0xc4, 0x3a, 0x81, 0xe8, 0x3c, 0x55, 0x49, 0x24, 0x35, 0xf6, 0x79, 0x2a, - 0x39, 0x6d, 0xa4, 0x7d, 0x4a, 0xb9, 0xb7, 0x0f, 0x91, 0x13, 0x5c, 0xe6, 0x77, 0x85, 0xf1, 0xf0, - 0xf2, 0x5e, 0x3d, 0x4a, 0xd9, 0x2b, 0xc7, 0xc9, 0xbf, 0xbb, 0xa0, 0x19, 0x51, 0x43, 0x09, 0xfb, - 0x27, 0x67, 0xda, 0xc0, 0x9f, 0x41, 0xb3, 0x1b, 0xc7, 0x4c, 0xeb, 0x1b, 0xae, 0x8d, 0x46, 0xa0, - 0xd5, 0x38, 0x6d, 0x86, 0xaf, 0xb0, 0xbd, 0x05, 0x9e, 0x1f, 0x77, 0x63, 0x41, 0x7c, 0x20, 0xfc, - 0x03, 0x80, 0xc1, 0x2a, 0x4b, 0xf4, 0xca, 0xd1, 0x5a, 0xd8, 0x4b, 0x7c, 0xb0, 0x65, 0x49, 0x3c, - 0x0e, 0xfc, 0x0d, 0x34, 0x7b, 0xc9, 0x6c, 0xcc, 0xb4, 0xe6, 0x4a, 0x6a, 0x74, 0xec, 0x24, 0x3e, - 0xc7, 0xf6, 0xc2, 0x8c, 0xb9, 0x9c, 0x0b, 0x76, 0xa5, 0xd2, 0x5e, 0x3f, 0xc2, 0x25, 0x82, 0xf8, - 0x68, 0xf8, 0x8b, 0x23, 0x77, 0x73, 0x73, 0x7b, 0xcd, 0x96, 0x1a, 0x7d, 0xe5, 0xc8, 0x87, 0xf7, - 0xc9, 0xd7, 0x6c, 0x49, 0x7c, 0x24, 0xbc, 0x02, 0x2f, 0x7a, 0xc9, 0xec, 0x4d, 0x7c, 0xab, 0xfa, - 0xb9, 0x8c, 0x0d, 0x57, 0x12, 0xb5, 0x5a, 0x3b, 0xa7, 0xcd, 0xf0, 0xf8, 0x3e, 0xd9, 0x47, 0x91, - 0x4d, 0x1a, 0x8c, 0xc0, 0x5e, 0x2f, 0xe3, 0xb3, 0x39, 0x8b, 0xd4, 0x82, 0x72, 0xa9, 0xd1, 0xa9, - 0x4b, 0xe2, 0x18, 0x8b, 0x10, 0xd7, 0x02, 0x35, 0x8b, 0xd4, 0x49, 0xf0, 0x0c, 0x7c, 0xd8, 0x1f, - 0xf4, 0x34, 0x3a, 0x2b, 0x77, 0x20, 0x42, 0xdc, 0xe7, 0xd3, 0x09, 0x9d, 0x0a, 0x66, 0x17, 0x6f, - 0xa4, 0xc9, 0x96, 0xc4, 0x41, 0x60, 0x07, 0x3c, 0x7b, 0x7b, 0x61, 0x8f, 0xea, 0xfb, 0xf5, 0x87, - 0xac, 0x43, 0xb2, 0xd8, 0x8c, 0x28, 0xcf, 0x74, 0xcd, 0x22, 0x05, 0x18, 0xfe, 0x0a, 0x3e, 0x19, - 0x1b, 0x6a, 0x78, 0x4c, 0x54, 0x6e, 0x98, 0x46, 0xa1, 0x23, 0x7f, 0x86, 0x45, 0x1b, 0xfb, 0x7e, - 0xec, 0x5e, 0xa4, 0x86, 0x85, 0xaf, 0x01, 0xe8, 0x66, 0xa9, 0xcd, 0x81, 0x33, 0x8d, 0xda, 0x55, - 0x8a, 0x6d, 0xdc, 0xcd, 0xd2, 0x22, 0xc5, 0x32, 0xbc, 0x24, 0x1e, 0x10, 0x4e, 0x00, 0x1c, 0x65, - 0xea, 0x6e, 0xd9, 0xcd, 0x52, 0xaf, 0x47, 0x3a, 0x8e, 0xfe, 0x8d, 0xa5, 0xdf, 0x8f, 0xae, 0x7b, - 0xc4, 0xb6, 0x17, 0xd9, 0xc2, 0x87, 0x11, 0xd8, 0xaf, 0xbc, 0x84, 0xca, 0x39, 0xd3, 0xe8, 0xb5, - 0x53, 0xfc, 0xd2, 0x57, 0x2c, 0x22, 0xd8, 0xbd, 0x9c, 0xd2, 0x06, 0x07, 0xfe, 0x00, 0x76, 0x6f, - 0x3a, 0x7d, 0x46, 0x4d, 0x9e, 0x31, 0xf4, 0xbb, 0xab, 0xfc, 0x3e, 0x16, 0x1d, 0xbc, 0x72, 0x6a, - 0xb2, 0x06, 0xc0, 0x09, 0x38, 0xec, 0xa6, 0xa9, 0xe0, 0x31, 0xb5, 0x25, 0x1f, 0xd2, 0x05, 0xd3, - 0xa9, 0xdb, 0xcc, 0x79, 0x55, 0x82, 0x0e, 0xee, 0xa6, 0xe9, 0x3a, 0x50, 0xb3, 0xc8, 0x76, 0x32, - 0x3c, 0x03, 0x1f, 0x8f, 0x8d, 0x24, 0xb9, 0x60, 0x1a, 0xf5, 0x9d, 0xd0, 0x1e, 0xb6, 0x53, 0x64, - 0x3c, 0x19, 0x62, 0xeb, 0x25, 0xab, 0x30, 0xc4, 0x60, 0x77, 0x48, 0xcd, 0xa5, 0x50, 0x53, 0x2a, - 0xd0, 0xa5, 0x4b, 0xf7, 0x00, 0xdb, 0x01, 0x36, 0xa4, 0xa6, 0xd3, 0x29, 0xfc, 0x64, 0x0d, 0x81, - 0x01, 0x78, 0x16, 0x0d, 0xbb, 0x13, 0x8d, 0xae, 0xca, 0xeb, 0xb4, 0xc2, 0x46, 0x43, 0x6a, 0xb0, - 0xfd, 0x73, 0xa1, 0x64, 0xc2, 0xe7, 0xa4, 0xc0, 0xc1, 0xbf, 0xc0, 0x8b, 0x1b, 0x3b, 0x37, 0xbc, - 0x42, 0x8d, 0x1c, 0xf5, 0x5b, 0xff, 0x32, 0x6f, 0x40, 0xbc, 0x1b, 0xbd, 0xc9, 0x86, 0x37, 0xa5, - 0xa0, 0xd7, 0x38, 0x7f, 0x3b, 0xc1, 0x13, 0x5b, 0x27, 0x17, 0x2a, 0x7a, 0x6c, 0x0d, 0x58, 0x77, - 0xd1, 0x26, 0x15, 0x9e, 0x83, 0xa6, 0x73, 0x95, 0xcd, 0xfb, 0xce, 0x29, 0x7d, 0xb1, 0xa1, 0x54, - 0xeb, 0x60, 0x1f, 0x7f, 0xf2, 0x1d, 0x78, 0x39, 0x54, 0x86, 0x27, 0x65, 0x0d, 0xaa, 0xa1, 0x77, - 0x00, 0x1a, 0x7c, 0x76, 0x87, 0x76, 0x5a, 0x3b, 0xa7, 0x7b, 0xc4, 0x2e, 0x4f, 0xf6, 0x40, 0x73, - 0x94, 0x1b, 0xc2, 0x74, 0xaa, 0xa4, 0x66, 0xd6, 0x8c, 0x98, 0x58, 0x99, 0x07, 0x60, 0x9f, 0x30, - 0xbd, 0x94, 0xf1, 0xca, 0x93, 0x80, 0x43, 0x5f, 0x58, 0x57, 0x01, 0x88, 0xc0, 0x47, 0x92, 0xdd, - 0x99, 0xc1, 0x4a, 0xbe, 0x32, 0x61, 0x1b, 0x34, 0xe4, 0x20, 0x41, 0x1f, 0xb8, 0x22, 0x7e, 0xbd, - 0x75, 0x54, 0xd6, 0x72, 0xb5, 0xe8, 0x90, 0x83, 0x4f, 0xed, 0xb4, 0xbe, 0xb8, 0xb5, 0xdd, 0x3b, - 0x66, 0xd9, 0x7b, 0x1e, 0x33, 0x78, 0x06, 0x1a, 0xa3, 0xdc, 0xc0, 0x03, 0x6c, 0x7f, 0xff, 0x78, - 0xc3, 0xfc, 0xa8, 0xf0, 0x78, 0x1b, 0xb1, 0xd0, 0x88, 0x89, 0xff, 0x85, 0x7a, 0x9b, 0x0c, 0xa3, - 0xe2, 0x53, 0xc5, 0x46, 0xab, 0x4f, 0x05, 0xe0, 0x79, 0xe1, 0xd8, 0x22, 0xf1, 0xd2, 0x79, 0xea, - 0x07, 0x13, 0x4e, 0xea, 0x27, 0x5e, 0xe9, 0x9c, 0x83, 0xc6, 0x25, 0x33, 0x10, 0x39, 0xca, 0x96, - 0x92, 0x1c, 0x1d, 0xdd, 0x8b, 0xac, 0xce, 0xf4, 0xa7, 0x9d, 0xe9, 0x73, 0xf7, 0xcf, 0xab, 0xfd, - 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x2d, 0xbf, 0xc6, 0xf2, 0x09, 0x00, 0x00, + // 911 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdf, 0x6f, 0xe3, 0x44, + 0x10, 0x56, 0xc9, 0xf5, 0xb8, 0x8c, 0xaf, 0x3f, 0xd8, 0xbb, 0x22, 0x53, 0x50, 0x09, 0x15, 0x88, + 0x16, 0xc1, 0x1a, 0x39, 0x39, 0x10, 0x94, 0x4a, 0x24, 0xf5, 0xa5, 0x8d, 0x5a, 0x85, 0x68, 0x93, + 0x07, 0x04, 0x4f, 0x1b, 0x67, 0x93, 0xae, 0xb4, 0x5d, 0x1b, 0xef, 0xfa, 0x94, 0xfc, 0xcd, 0xfc, + 0x13, 0x68, 0xd7, 0x76, 0x6c, 0x5f, 0x82, 0x74, 0xed, 0xc3, 0xd6, 0xbb, 0x33, 0xdf, 0xf7, 0x79, + 0x76, 0x66, 0x3c, 0x0d, 0x34, 0x93, 0x38, 0xc4, 0x71, 0x12, 0xe9, 0x08, 0x35, 0x92, 0x38, 0x3c, + 0xbe, 0x58, 0x70, 0x7d, 0x9f, 0x4e, 0x71, 0x18, 0x3d, 0x78, 0x82, 0x2f, 0xa8, 0x8e, 0xbc, 0x77, + 0x71, 0xfc, 0x03, 0x5d, 0x30, 0xa9, 0xbd, 0x58, 0xa4, 0x0b, 0x2e, 0x95, 0xb1, 0x78, 0x0f, 0xd1, + 0x8c, 0x09, 0x8f, 0x86, 0x76, 0x65, 0x0a, 0x8f, 0x25, 0x4f, 0xe7, 0x33, 0xb3, 0x72, 0xf2, 0xe0, + 0x71, 0x64, 0x2e, 0x35, 0x4b, 0xe6, 0x34, 0x64, 0xaa, 0xb2, 0xcd, 0xa5, 0x7e, 0x79, 0x9c, 0x94, + 0xf0, 0x3d, 0xe1, 0x3f, 0x91, 0xda, 0xf6, 0x44, 0xfb, 0x89, 0xd4, 0x8e, 0x27, 0x3a, 0x4f, 0x4b, + 0x9c, 0xa4, 0xda, 0xac, 0xa7, 0x91, 0x95, 0x96, 0x66, 0xe5, 0xe4, 0xdb, 0x0f, 0x21, 0x0b, 0x2e, + 0xd3, 0xe5, 0x07, 0xe4, 0xfd, 0xe2, 0xb1, 0x62, 0x95, 0xf4, 0x9d, 0xfe, 0xdb, 0x04, 0x27, 0xa0, + 0x9a, 0x12, 0xf6, 0x4f, 0xca, 0x94, 0x46, 0x3f, 0x81, 0xd3, 0x0d, 0x43, 0xa6, 0xd4, 0x1d, 0x57, + 0x5a, 0xb9, 0xd0, 0x6a, 0x9c, 0x39, 0xfe, 0x6b, 0x6c, 0xba, 0xad, 0x62, 0xc7, 0xdd, 0x50, 0x10, + 0x87, 0x96, 0x06, 0xf4, 0x3b, 0xc0, 0x60, 0x1d, 0x98, 0xfb, 0xda, 0xd2, 0x5a, 0xb8, 0x12, 0xeb, + 0x60, 0xcb, 0x96, 0x40, 0x09, 0x40, 0x17, 0xe0, 0xf4, 0xe6, 0xb3, 0x31, 0x53, 0x8a, 0x47, 0x52, + 0xb9, 0x27, 0x56, 0xe2, 0x33, 0x6c, 0x5a, 0x75, 0xcc, 0xe5, 0x42, 0xb0, 0x9b, 0x28, 0xee, 0xf5, + 0x03, 0x9c, 0x23, 0x88, 0x33, 0x2d, 0xd1, 0xe8, 0x67, 0x4b, 0xee, 0xa6, 0xfa, 0xfe, 0x96, 0xad, + 0x94, 0xfb, 0xa5, 0x25, 0x1f, 0x6d, 0x92, 0x6f, 0xd9, 0xca, 0x12, 0x0b, 0x24, 0xba, 0x81, 0x83, + 0xde, 0x7c, 0xf6, 0x36, 0xbc, 0x8f, 0xfa, 0xa9, 0x0c, 0x35, 0x8f, 0xa4, 0xdb, 0x6a, 0xed, 0x9c, + 0x39, 0xfe, 0xc9, 0x26, 0xb9, 0x8a, 0x22, 0x07, 0xd3, 0x3a, 0x0d, 0x05, 0xb0, 0xd7, 0x4b, 0xf8, + 0x6c, 0xc1, 0x82, 0xe8, 0x81, 0x72, 0xa9, 0xdc, 0x33, 0x1b, 0xc4, 0x09, 0x16, 0x3e, 0xae, 0x39, + 0x6a, 0x27, 0xb2, 0x37, 0xad, 0xfa, 0xd0, 0x39, 0x3c, 0xeb, 0x0f, 0x7a, 0xca, 0x3d, 0xcf, 0x6f, + 0x20, 0x7c, 0xdc, 0xe7, 0xd3, 0x09, 0x9d, 0x0a, 0x66, 0x36, 0x6f, 0xa5, 0x4e, 0x56, 0xe4, 0xd9, + 0x7c, 0xd0, 0x53, 0xa8, 0x03, 0xbb, 0x7f, 0x5e, 0x99, 0x54, 0x7d, 0x57, 0xbe, 0xc8, 0x18, 0x24, + 0x0b, 0xf5, 0x88, 0xf2, 0x44, 0xd5, 0x4e, 0x64, 0x77, 0x69, 0xc0, 0xe8, 0x57, 0x78, 0x39, 0xd6, + 0x54, 0xf3, 0x90, 0x44, 0xa9, 0x66, 0xca, 0xf5, 0x2d, 0xf9, 0x53, 0x2c, 0xda, 0xb8, 0x6a, 0xc7, + 0xf6, 0x41, 0x5e, 0xaa, 0x8a, 0x0d, 0xbd, 0x01, 0xe8, 0x26, 0xb1, 0x89, 0x81, 0x33, 0xe5, 0xb6, + 0x8b, 0x10, 0xdb, 0xb8, 0x9b, 0xc4, 0x59, 0x88, 0xb9, 0x7b, 0x45, 0x80, 0xae, 0x81, 0x68, 0x02, + 0x68, 0x94, 0x44, 0xcb, 0x55, 0x37, 0x89, 0x2b, 0x3d, 0xd2, 0xb1, 0xf4, 0xaf, 0x0d, 0x7d, 0xd3, + 0x5b, 0xf6, 0x88, 0x69, 0x2f, 0x82, 0xe2, 0x0d, 0x04, 0x0a, 0x60, 0xbf, 0xe0, 0x11, 0x2a, 0x17, + 0x4c, 0xb9, 0x6f, 0xac, 0xe2, 0x17, 0x55, 0xc5, 0xcc, 0x83, 0xed, 0xc3, 0x2a, 0xed, 0xc7, 0x35, + 0x0f, 0xfa, 0x1e, 0x9a, 0x77, 0x9d, 0x3e, 0xa3, 0x3a, 0x4d, 0x98, 0xfb, 0x9b, 0xad, 0xfc, 0x3e, + 0x16, 0x1d, 0xbc, 0x36, 0x2a, 0xd2, 0x14, 0xc5, 0x1e, 0x4d, 0xe0, 0xa8, 0x1b, 0xc7, 0x82, 0x87, + 0xd4, 0x94, 0x7c, 0x48, 0x1f, 0x98, 0x8a, 0xed, 0x65, 0x2e, 0x8b, 0x12, 0x74, 0x70, 0x37, 0x8e, + 0x4b, 0x47, 0xed, 0x44, 0x8e, 0xe8, 0x36, 0x32, 0x3a, 0x87, 0x17, 0x63, 0x2d, 0x49, 0x2a, 0x98, + 0x72, 0xfb, 0x56, 0x68, 0x0f, 0x9b, 0x59, 0x31, 0x9e, 0x0c, 0xb1, 0xb1, 0x92, 0x17, 0x2a, 0x77, + 0x23, 0x0c, 0xcd, 0x21, 0xd5, 0xd7, 0x22, 0x9a, 0x52, 0xe1, 0x5e, 0xdb, 0x70, 0x0f, 0xb1, 0x19, + 0x4a, 0x43, 0xaa, 0x3b, 0x9d, 0xcc, 0x4e, 0x9a, 0xb2, 0x80, 0x20, 0x0f, 0x76, 0x83, 0x61, 0x77, + 0xa2, 0xdc, 0x9b, 0xfc, 0x73, 0x5a, 0x63, 0x83, 0x21, 0xd5, 0xd8, 0xfc, 0xb9, 0x8a, 0xe4, 0x9c, + 0x2f, 0xc8, 0xee, 0xcc, 0xe0, 0xd0, 0x1f, 0x70, 0x70, 0x67, 0x46, 0x45, 0xa5, 0x50, 0x23, 0x4b, + 0xfd, 0xa6, 0xfa, 0x31, 0xbf, 0x07, 0xa9, 0x7c, 0xd1, 0x07, 0xa2, 0xee, 0x42, 0x77, 0xb9, 0x60, + 0xa5, 0x71, 0xfe, 0xb2, 0x82, 0xa7, 0xa6, 0x4e, 0xd6, 0x95, 0xf5, 0x5d, 0x09, 0x28, 0xbb, 0x28, + 0x53, 0x2b, 0x3d, 0xe8, 0x12, 0x1c, 0x4b, 0xc9, 0x9b, 0xf7, 0x6f, 0xab, 0xf4, 0xf9, 0x7b, 0x4a, + 0xb5, 0x0e, 0x76, 0x44, 0x89, 0x3f, 0xfd, 0x16, 0x5e, 0x0d, 0x23, 0xcd, 0xe7, 0x79, 0x0d, 0x8a, + 0xa1, 0x77, 0x08, 0x0d, 0x3e, 0x5b, 0xba, 0x3b, 0xad, 0x9d, 0xb3, 0x3d, 0x62, 0xb6, 0xa7, 0x7b, + 0xe0, 0x8c, 0x52, 0x4d, 0x98, 0x8a, 0x23, 0xa9, 0x98, 0x39, 0x06, 0x4c, 0xac, 0x8f, 0x87, 0xb0, + 0x4f, 0x98, 0x5a, 0xc9, 0x70, 0x6d, 0x99, 0xc3, 0x51, 0x55, 0x58, 0x15, 0x0e, 0xe4, 0xc2, 0xc7, + 0x92, 0x2d, 0xf5, 0x60, 0x2d, 0x5f, 0x1c, 0x51, 0x1b, 0x1a, 0x72, 0x30, 0x77, 0x3f, 0xb2, 0x45, + 0xfc, 0x6a, 0xeb, 0xa8, 0xac, 0xc5, 0x6a, 0xd0, 0x3e, 0x87, 0x4f, 0xcc, 0xb4, 0xbe, 0xba, 0x37, + 0xdd, 0x3b, 0x66, 0xc9, 0x3b, 0x1e, 0x32, 0x74, 0x0e, 0x8d, 0x51, 0xaa, 0xd1, 0x21, 0x36, 0xbf, + 0x2a, 0x2a, 0xc3, 0xfc, 0x38, 0xb3, 0x54, 0x2e, 0x62, 0xa0, 0x01, 0x13, 0xff, 0x0b, 0xad, 0x5c, + 0xd2, 0x0f, 0xb2, 0x57, 0x65, 0x17, 0x2d, 0x5e, 0xe5, 0xc1, 0xf3, 0xcc, 0xb0, 0x45, 0xe2, 0x95, + 0xb5, 0xd4, 0x13, 0xe3, 0x4f, 0xea, 0x19, 0x2f, 0x74, 0x2e, 0xa1, 0x71, 0xcd, 0x34, 0x72, 0x2d, + 0x65, 0x4b, 0x49, 0x8e, 0x8f, 0x37, 0x3c, 0xeb, 0x9c, 0xfe, 0xb8, 0x33, 0x7d, 0x6e, 0xff, 0x79, + 0xb5, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x10, 0xbc, 0x66, 0xca, 0x48, 0x09, 0x00, 0x00, } From abe80ed7719a4ccf2407cea48303a017b90c3278 Mon Sep 17 00:00:00 2001 From: AndrejKilvady Date: Wed, 18 Jul 2018 15:50:16 +0200 Subject: [PATCH 014/174] Fix ipv6 BD test2 Signed-off-by: AndrejKilvady --- .../bridge_domain/test2_bridge_domainIPv6.robot | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tests/robot/suites/trafficIPv6/bridge_domain/test2_bridge_domainIPv6.robot b/tests/robot/suites/trafficIPv6/bridge_domain/test2_bridge_domainIPv6.robot index b961c83be5..0d31e37600 100644 --- a/tests/robot/suites/trafficIPv6/bridge_domain/test2_bridge_domainIPv6.robot +++ b/tests/robot/suites/trafficIPv6/bridge_domain/test2_bridge_domainIPv6.robot @@ -51,11 +51,10 @@ Start Agents Sleep ${SYNC_SLEEP} Check Created Interfaces - vat_term: Check Loopback Interface State agent_vpp_1 bvi_loop0 enabled=1 mac=${MAC_LOOP1} ipv6=${IP_1}/${PREFIX} - vat_term: Check Loopback Interface State agent_vpp_2 bvi_loop0 enabled=1 mac=${MAC_LOOP2} ipv6=${IP_2}/${PREFIX} - vat_term: Check Memif Interface State agent_vpp_1 memif0 mac=${MAC_MEMIF1} role=master id=1 connected=1 enabled=1 socket=${AGENT_VPP_1_MEMIF_SOCKET_FOLDER}/m1.sock - vat_term: Check Memif Interface State agent_vpp_2 memif0 mac=${MAC_MEMIF2} role=slave id=1 connected=1 enabled=1 socket=${AGENT_VPP_1_MEMIF_SOCKET_FOLDER}/m1.sock - + vat_term: Check Loopback Interface State agent_vpp_1 bvi_loop0 enabled=1 mac=${MAC_LOOP1} ipv6=${IP_4}/${PREFIX} + vat_term: Check Loopback Interface State agent_vpp_2 bvi_loop0 enabled=1 mac=${MAC_LOOP2} ipv6=${IP_5}/${PREFIX} + vat_term: Check Memif Interface State agent_vpp_1 memif0 mac=${MAC_MEMIF1} role=master id=1 connected=1 enabled=1 socket=${AGENT_VPP_1_MEMIF_SOCKET_FOLDER}/m0.sock + vat_term: Check Memif Interface State agent_vpp_2 memif0 mac=${MAC_MEMIF2} role=slave id=1 connected=1 enabled=1 socket=${AGENT_VPP_1_MEMIF_SOCKET_FOLDER}/m0.sock Pinging Ping6 From agent_vpp_1 To ${IP_2} From e2e0e27eff3fedcf961cd3264816d2730573da72 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Wed, 18 Jul 2018 17:22:34 +0200 Subject: [PATCH 015/174] arp dump + rest support Signed-off-by: Vladimir Lavor --- plugins/rest/plugin_impl_rest.go | 2 + plugins/rest/rest_handlers.go | 27 +++++++++ plugins/vpp/l3plugin/arp_config.go | 6 +- plugins/vpp/l3plugin/vppcalls/arp_vppcalls.go | 8 ++- plugins/vpp/l3plugin/vppdump/dump_vppcalls.go | 55 +++++++++++++++++++ 5 files changed, 91 insertions(+), 7 deletions(-) diff --git a/plugins/rest/plugin_impl_rest.go b/plugins/rest/plugin_impl_rest.go index 9480c66ade..52fd42df19 100644 --- a/plugins/rest/plugin_impl_rest.go +++ b/plugins/rest/plugin_impl_rest.go @@ -52,6 +52,7 @@ func (plugin *Plugin) Init() (err error) { {Name: "Bridge domains", Path: "/bridgedomains"}, {Name: "L2Fibs", Path: "/l2fibs"}, {Name: "XConnectorPairs", Path: "/xconnectpairs"}, + {Name: "ARPs", Path: "/arps"}, {Name: "Static routes", Path: "/staticroutes"}, {Name: "ACL IP", Path: "/acl/ip"}, {Name: "Telemetry", Path: "/telemetry"}, @@ -68,6 +69,7 @@ func (plugin *Plugin) AfterInit() (err error) { plugin.HTTPHandlers.RegisterHTTPHandler("/bridgedomainids", plugin.bridgeDomainIdsGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler("/l2fibs", plugin.fibTableEntriesGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler("/xconnectpairs", plugin.xconnectPairsGetHandler, "GET") + plugin.HTTPHandlers.RegisterHTTPHandler("/arps", plugin.arpGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler("/staticroutes", plugin.staticRoutesGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler(fmt.Sprintf("/acl/interface/{%s:[0-9]+}", swIndexVarName), plugin.interfaceACLGetHandler, "GET") diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index 45b7b7ad2a..c7abb45feb 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -171,6 +171,33 @@ func (plugin *Plugin) xconnectPairsGetHandler(formatter *render.Render) http.Han } } +// staticRoutesGetHandler - used to get list of all static routes +func (plugin *Plugin) arpGetHandler(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + + plugin.Log.Debug("Getting list of all ARPs") + + // create an API channel + ch, err := plugin.GoVppmux.NewAPIChannel() + if err != nil { + plugin.Log.Errorf("Error creating channel: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + defer ch.Close() + + res, err := l3plugin.DumpArps(plugin.Log, ch, nil) + if err != nil { + plugin.Log.Errorf("Error: %v", err) + formatter.JSON(w, http.StatusInternalServerError, nil) + return + } + + plugin.Log.Debug(res) + formatter.JSON(w, http.StatusOK, res) + } +} + // staticRoutesGetHandler - used to get list of all static routes func (plugin *Plugin) staticRoutesGetHandler(formatter *render.Render) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { diff --git a/plugins/vpp/l3plugin/arp_config.go b/plugins/vpp/l3plugin/arp_config.go index 2164a1d764..4b5b5346fe 100644 --- a/plugins/vpp/l3plugin/arp_config.go +++ b/plugins/vpp/l3plugin/arp_config.go @@ -319,14 +319,10 @@ func isValidARP(arpInput *l3.ArpTable_ArpEntry, log logging.Logger) bool { // transformArp converts raw entry data to ARP object func transformArp(arpInput *l3.ArpTable_ArpEntry, ifIndex uint32) (*vppcalls.ArpEntry, error) { ipAddr := net.ParseIP(arpInput.IpAddress) - macAddr, err := net.ParseMAC(arpInput.PhysAddress) - if err != nil { - return nil, err - } arp := &vppcalls.ArpEntry{ Interface: ifIndex, IPAddress: ipAddr, - MacAddress: macAddr, + MacAddress: arpInput.PhysAddress, Static: arpInput.Static, } return arp, nil diff --git a/plugins/vpp/l3plugin/vppcalls/arp_vppcalls.go b/plugins/vpp/l3plugin/vppcalls/arp_vppcalls.go index 60854be54d..e8c3c5abd2 100644 --- a/plugins/vpp/l3plugin/vppcalls/arp_vppcalls.go +++ b/plugins/vpp/l3plugin/vppcalls/arp_vppcalls.go @@ -35,7 +35,7 @@ var ArpMessages = []govppapi.Message{ type ArpEntry struct { Interface uint32 IPAddress net.IP - MacAddress net.HardwareAddr + MacAddress string Static bool } @@ -68,7 +68,11 @@ func vppAddDelArp(entry *ArpEntry, vppChan govppapi.Channel, delete bool, stopwa } else { req.IsStatic = 0 } - req.MacAddress = []byte(entry.MacAddress) + macAddr, err := net.ParseMAC(entry.MacAddress) + if err != nil { + return err + } + req.MacAddress = []byte(macAddr) req.IsNoAdjFib = 1 req.SwIfIndex = entry.Interface diff --git a/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go b/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go index 85689fa17a..3e4935406b 100644 --- a/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go +++ b/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go @@ -128,3 +128,58 @@ func dumpStaticRouteIPDetails(tableID uint32, tableName []byte, address []byte, return rt, nil } + +// DumpArps dumps ARPs from VPP and fills them into the provided static route map. +func DumpArps(log logging.Logger, vppChan govppapi.Channel, timeLog measure.StopWatchEntry) ([]*vppcalls.ArpEntry, error) { + // IPFibDump time measurement + start := time.Now() + defer func() { + if timeLog != nil { + timeLog.LogTimeEntry(time.Since(start)) + } + }() + + var arps []*vppcalls.ArpEntry + + // Dump ARPs. + reqCtx := vppChan.SendMultiRequest(&l3ba.IPNeighborDump{ + SwIfIndex: 0xffffffff, + }) + for { + arpDetails := &l3ba.IPNeighborDetails{} + stop, err := reqCtx.ReceiveReply(arpDetails) + if stop { + break + } + if err != nil { + log.Error(err) + return nil, err + } + + var mac net.HardwareAddr = arpDetails.MacAddress + arp := &vppcalls.ArpEntry{ + Interface: arpDetails.SwIfIndex, + MacAddress: mac.String(), + Static: uintToBool(arpDetails.IsStatic), + } + + var address net.IP + if arpDetails.IsIpv6 == 1 { + address = net.IP(arpDetails.IPAddress).To16() + } else { + address = net.IP(arpDetails.IPAddress[:4]).To4() + } + arp.IPAddress = address + + arps = append(arps, arp) + } + + return arps, nil +} + +func uintToBool(value uint8) bool { + if value == 0 { + return false + } + return true +} From bc6040ea00fbeb111f1e7eb74b50e4babde95725 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Wed, 18 Jul 2018 18:00:25 +0200 Subject: [PATCH 016/174] fix tests Signed-off-by: Vladimir Lavor --- plugins/vpp/l3plugin/vppcalls/arp_vppcalls_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/vpp/l3plugin/vppcalls/arp_vppcalls_test.go b/plugins/vpp/l3plugin/vppcalls/arp_vppcalls_test.go index b8669ec9b0..d2ec55799f 100644 --- a/plugins/vpp/l3plugin/vppcalls/arp_vppcalls_test.go +++ b/plugins/vpp/l3plugin/vppcalls/arp_vppcalls_test.go @@ -26,19 +26,19 @@ var arpEntries = []vppcalls.ArpEntry{ { Interface: 1, IPAddress: []byte{192, 168, 10, 21}, - MacAddress: []byte{0x59, 0x6C, 0xde, 0xad, 0x00, 0x01}, + MacAddress: "59:6C:45:59:8E:BD", Static: true, }, { Interface: 1, IPAddress: []byte{192, 168, 10, 22}, - MacAddress: []byte{0x59, 0x6C, 0xde, 0xad, 0x00, 0x02}, + MacAddress:"6C:45:59:59:8E:BD", Static: false, }, { Interface: 1, IPAddress: []byte{0xde, 0xad, 0, 0, 0, 0, 0, 0, 0xde, 0xad, 0, 0, 0, 0, 0, 1}, - MacAddress: []byte{0x59, 0x6C, 0xde, 0xad, 0x00, 0x02}, + MacAddress: "8E:BD:6C:45:59:59", Static: false, }, } From 54a94288765c54b64fbe091181cb4f27a3a318e8 Mon Sep 17 00:00:00 2001 From: Rastislav Szabo Date: Thu, 19 Jul 2018 09:52:58 +0200 Subject: [PATCH 017/174] Introduce route types Signed-off-by: Rastislav Szabo --- plugins/vpp/l3plugin/data_resync.go | 20 +-- plugins/vpp/l3plugin/route_utils.go | 40 +++-- .../vpp/l3plugin/vppcalls/route_vppcalls.go | 42 +++--- plugins/vpp/l3plugin/vppdump/dump_vppcalls.go | 23 ++- plugins/vpp/model/l3/l3.pb.go | 140 +++++++++++------- plugins/vpp/model/l3/l3.proto | 29 ++-- 6 files changed, 169 insertions(+), 125 deletions(-) diff --git a/plugins/vpp/l3plugin/data_resync.go b/plugins/vpp/l3plugin/data_resync.go index b928adb951..60393e2a86 100644 --- a/plugins/vpp/l3plugin/data_resync.go +++ b/plugins/vpp/l3plugin/data_resync.go @@ -48,8 +48,8 @@ func (plugin *RouteConfigurator) Resync(nbRoutes []*l3.StaticRoutes_Route) error nbRouteID := routeIdentifier(nbRoute.VrfId, nbRoute.DstIpAddr, nbRoute.NextHopAddr) nbIfIdx, _, found := plugin.ifIndexes.LookupIdx(nbRoute.OutgoingInterface) if !found { - if isVrfLookupRoute(nbRoute) { - // expected by VRF lookup route + if nbRoute.Type == l3.StaticRoutes_Route_INTER_VRF { + // expected by inter VRF-routes nbIfIdx = vppcalls.NextHopOutgoingIfUnset } else { plugin.log.Debugf("RESYNC routes: outgoing interface not found for %s", nbRouteID) @@ -66,6 +66,11 @@ func (plugin *RouteConfigurator) Resync(nbRoutes []*l3.StaticRoutes_Route) error for _, vppRoute := range vppRoutes { vppRouteID := routeIdentifier(vppRoute.VrfID, vppRoute.DstAddr.String(), vppRoute.NextHopAddr.String()) plugin.log.Debugf("RESYNC routes: comparing %s and %s", nbRouteID, vppRouteID) + if int32(vppRoute.Type) != int32(nbRoute.Type) { + plugin.log.Debugf("RESYNC routes: route type is different (NB: %d, VPP %d)", + nbRoute.Type, vppRoute.Type) + continue + } if vppRoute.OutIface != nbIfIdx { plugin.log.Debugf("RESYNC routes: interface index is different (NB: %d, VPP %d)", nbIfIdx, vppRoute.OutIface) @@ -101,14 +106,9 @@ func (plugin *RouteConfigurator) Resync(nbRoutes []*l3.StaticRoutes_Route) error continue } } - if vppRoute.NextHopVrfId != nbRoute.NextHopVrfId { - plugin.log.Debugf("RESYNC routes: next hop VRF ID is different (NB: %d, VPP %d)", - nbRoute.NextHopVrfId, vppRoute.NextHopVrfId) - continue - } - if vppRoute.LookupVrfID != nbRoute.LookupVrfId { - plugin.log.Debugf("RESYNC routes: Lookup VRF ID is different (NB: %d, VPP %d)", - nbRoute.LookupVrfId, vppRoute.LookupVrfID) + if vppRoute.ViaVrfId != nbRoute.ViaVrfId { + plugin.log.Debugf("RESYNC routes: via VRF ID is different (NB: %d, VPP %d)", + nbRoute.ViaVrfId, vppRoute.ViaVrfId) continue } // Register existing routes diff --git a/plugins/vpp/l3plugin/route_utils.go b/plugins/vpp/l3plugin/route_utils.go index 9f77fabd83..9e121f6c1d 100644 --- a/plugins/vpp/l3plugin/route_utils.go +++ b/plugins/vpp/l3plugin/route_utils.go @@ -50,18 +50,21 @@ func (arr SortedRoutes) Less(i, j int) bool { } func eqRoutes(a *vppcalls.Route, b *vppcalls.Route) bool { - return a.VrfID == b.VrfID && + return a.Type == b.Type && + a.VrfID == b.VrfID && bytes.Equal(a.DstAddr.IP, b.DstAddr.IP) && bytes.Equal(a.DstAddr.Mask, b.DstAddr.Mask) && bytes.Equal(a.NextHopAddr, b.NextHopAddr) && - a.NextHopVrfId == b.VrfID && + a.ViaVrfId == b.ViaVrfId && a.OutIface == b.OutIface && - a.LookupVrfID == b.LookupVrfID && a.Weight == b.Weight && a.Preference == b.Preference } func lessRoute(a *vppcalls.Route, b *vppcalls.Route) bool { + if a.Type != b.Type { + return a.Type < b.Type + } if a.VrfID != b.VrfID { return a.VrfID < b.VrfID } @@ -74,15 +77,12 @@ func lessRoute(a *vppcalls.Route, b *vppcalls.Route) bool { if !bytes.Equal(a.NextHopAddr, b.NextHopAddr) { return bytes.Compare(a.NextHopAddr, b.NextHopAddr) < 0 } - if a.NextHopVrfId != b.NextHopVrfId { - return a.NextHopVrfId < b.NextHopVrfId + if a.ViaVrfId != b.ViaVrfId { + return a.ViaVrfId < b.ViaVrfId } if a.OutIface != b.OutIface { return a.OutIface < b.OutIface } - if a.LookupVrfID != b.LookupVrfID { - return a.LookupVrfID < b.LookupVrfID - } if a.Preference != b.Preference { return a.Preference < b.Preference } @@ -97,8 +97,8 @@ func TransformRoute(routeInput *l3.StaticRoutes_Route, swIndex uint32, log loggi return nil, nil } if routeInput.DstIpAddr == "" { - if !isVrfLookupRoute(routeInput) { - // no destination address is only allowed for VRF lookup route + if routeInput.Type != l3.StaticRoutes_Route_INTER_VRF { + // no destination address is only allowed for inter-VRF routes log.Infof("Route does not contain destination address") return nil, nil } @@ -116,14 +116,14 @@ func TransformRoute(routeInput *l3.StaticRoutes_Route, swIndex uint32, log loggi nextHopIP = nextHopIP.To4() } route := &vppcalls.Route{ - VrfID: vrfID, - DstAddr: *parsedDestIP, - NextHopAddr: nextHopIP, - NextHopVrfId: routeInput.NextHopVrfId, - OutIface: swIndex, - LookupVrfID: routeInput.LookupVrfId, - Weight: routeInput.Weight, - Preference: routeInput.Preference, + Type: vppcalls.RouteType(routeInput.Type), + VrfID: vrfID, + DstAddr: *parsedDestIP, + NextHopAddr: nextHopIP, + ViaVrfId: routeInput.ViaVrfId, + OutIface: swIndex, + Weight: routeInput.Weight, + Preference: routeInput.Preference, } return route, nil } @@ -173,7 +173,3 @@ func (plugin *RouteConfigurator) diffRoutes(new []*vppcalls.Route, old []*vppcal } return } - -func isVrfLookupRoute(r *l3.StaticRoutes_Route) bool { - return r.OutgoingInterface == "" && r.VrfId != r.LookupVrfId -} diff --git a/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go b/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go index 23b08c2890..abe3a59c20 100644 --- a/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go +++ b/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go @@ -35,18 +35,27 @@ var RouteMessages = []govppapi.Message{ &ip.IP6FibDetails{}, } +type RouteType int32 + +const ( + // IntraVrf route forwards in the specified vrf_id only + IntraVrf RouteType = iota + // InterVrf route forwards using the lookup in the via_vrf_id + InterVrf +) + // Route represents a forward IP route entry with the parameters of gateway // to which packets should be forwarded when a given routing table entry is applied. type Route struct { - VrfID uint32 `json:"vrf_id"` - TableName string `json:"table_name"` - DstAddr net.IPNet `json:"dst_addr"` - NextHopAddr net.IP `json:"next_hop_addr"` - NextHopVrfId uint32 `json:"next_hop_vrf_id"` - OutIface uint32 `json:"out_iface"` - LookupVrfID uint32 `json:"lookup_vrf_id"` - Weight uint32 `json:"weight"` - Preference uint32 `json:"preference"` + Type RouteType `json:"type"` + VrfID uint32 `json:"vrf_id"` + TableName string `json:"table_name"` + DstAddr net.IPNet `json:"dst_addr"` + NextHopAddr net.IP `json:"next_hop_addr"` + OutIface uint32 `json:"out_iface"` + ViaVrfId uint32 `json:"via_vrf_id"` + Weight uint32 `json:"weight"` + Preference uint32 `json:"preference"` } const ( @@ -103,11 +112,10 @@ func vppAddDelRoute(route *Route, vppChan govppapi.Channel, delete bool, stopwat // VRF req.TableID = route.VrfID - if isVrfLookupRoute(route) { - // next hop not specified = VRF lookup - req.NextHopTableID = route.LookupVrfID + if route.Type == InterVrf { + req.NextHopTableID = route.ViaVrfId } else { - req.NextHopTableID = route.NextHopVrfId + req.NextHopTableID = route.VrfID } // Multi path is always true @@ -130,8 +138,8 @@ func VppAddRoute(route *Route, vppChan govppapi.Channel, stopwatch *measure.Stop if err := ifvppcalls.CreateVrfIfNeeded(route.VrfID, vppChan); err != nil { return err } - if isVrfLookupRoute(route) { - if err := ifvppcalls.CreateVrfIfNeeded(route.LookupVrfID, vppChan); err != nil { + if route.Type == InterVrf { + if err := ifvppcalls.CreateVrfIfNeeded(route.ViaVrfId, vppChan); err != nil { return err } } @@ -142,7 +150,3 @@ func VppAddRoute(route *Route, vppChan govppapi.Channel, stopwatch *measure.Stop func VppDelRoute(route *Route, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { return vppAddDelRoute(route, vppChan, true, stopwatch) } - -func isVrfLookupRoute(r *Route) bool { - return r.OutIface == NextHopOutgoingIfUnset && r.VrfID != r.LookupVrfID -} diff --git a/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go b/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go index 54baabc289..ecd7f94df6 100644 --- a/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go +++ b/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go @@ -53,6 +53,10 @@ func DumpStaticRoutes(log logging.Logger, vppChan govppapi.Channel, timeLog meas log.Error(err) return nil, err } + if len(fibDetails.Path) > 0 && fibDetails.Path[0].IsDrop == 1 { + // skip drop routes, not supported by vpp-agent + continue + } ipv4Route, err := dumpStaticRouteIPv4Details(fibDetails) if err != nil { return nil, err @@ -72,6 +76,10 @@ func DumpStaticRoutes(log logging.Logger, vppChan govppapi.Channel, timeLog meas log.Error(err) return nil, err } + if len(fibDetails.Path) > 0 && fibDetails.Path[0].IsDrop == 1 { + // skip drop routes, not supported by vpp-agent + continue + } ipv6Route, err := dumpStaticRouteIPv6Details(fibDetails) if err != nil { return nil, err @@ -100,7 +108,9 @@ func dumpStaticRouteIPDetails(tableID uint32, tableName []byte, address []byte, ipAddr = fmt.Sprintf("%s/%d", net.IP(address[:4]).To4().String(), uint32(prefixLen)) } - rt := &vppcalls.Route{} + rt := &vppcalls.Route{ + Type: vppcalls.IntraVrf, // default + } // IP net parsedIP, _, err := addrs.ParseIPWithPrefix(ipAddr) @@ -113,6 +123,8 @@ func dumpStaticRouteIPDetails(tableID uint32, tableName []byte, address []byte, rt.DstAddr = *parsedIP if len(path) > 0 { + // TODO: if len(path) > 1, it means multiple NB routes (load-balancing) - not implemented properly + var nextHopAddr net.IP if ipv6 { nextHopAddr = net.IP(path[0].NextHop).To16() @@ -122,11 +134,10 @@ func dumpStaticRouteIPDetails(tableID uint32, tableName []byte, address []byte, rt.NextHopAddr = nextHopAddr - if nextHopAddr.IsUnspecified() && path[0].SwIfIndex == vppcalls.NextHopOutgoingIfUnset { - // next hop IP nor outgoing interface is specified = VRF lookup route - rt.LookupVrfID = path[0].TableID - } else { - rt.NextHopVrfId = path[0].TableID + if path[0].SwIfIndex == vppcalls.NextHopOutgoingIfUnset && path[0].TableID != tableID { + // outgoing interface not specified and path table id not equal to route table id = inter-VRF route + rt.Type = vppcalls.InterVrf + rt.ViaVrfId = path[0].TableID } rt.OutIface = path[0].SwIfIndex diff --git a/plugins/vpp/model/l3/l3.pb.go b/plugins/vpp/model/l3/l3.pb.go index 840b0f3af1..aeb07f404d 100644 --- a/plugins/vpp/model/l3/l3.pb.go +++ b/plugins/vpp/model/l3/l3.pb.go @@ -31,6 +31,29 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +type StaticRoutes_Route_RouteType int32 + +const ( + StaticRoutes_Route_INTRA_VRF StaticRoutes_Route_RouteType = 0 + StaticRoutes_Route_INTER_VRF StaticRoutes_Route_RouteType = 1 +) + +var StaticRoutes_Route_RouteType_name = map[int32]string{ + 0: "INTRA_VRF", + 1: "INTER_VRF", +} +var StaticRoutes_Route_RouteType_value = map[string]int32{ + "INTRA_VRF": 0, + "INTER_VRF": 1, +} + +func (x StaticRoutes_Route_RouteType) String() string { + return proto.EnumName(StaticRoutes_Route_RouteType_name, int32(x)) +} +func (StaticRoutes_Route_RouteType) EnumDescriptor() ([]byte, []int) { + return fileDescriptorL3, []int{0, 0, 0} +} + // Static routes type StaticRoutes struct { Routes []*StaticRoutes_Route `protobuf:"bytes,1,rep,name=routes" json:"routes,omitempty"` @@ -49,15 +72,15 @@ func (m *StaticRoutes) GetRoutes() []*StaticRoutes_Route { } type StaticRoutes_Route struct { - VrfId uint32 `protobuf:"varint,1,opt,name=vrf_id,json=vrfId,proto3" json:"vrf_id,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - DstIpAddr string `protobuf:"bytes,3,opt,name=dst_ip_addr,json=dstIpAddr,proto3" json:"dst_ip_addr,omitempty"` - NextHopAddr string `protobuf:"bytes,4,opt,name=next_hop_addr,json=nextHopAddr,proto3" json:"next_hop_addr,omitempty"` - NextHopVrfId uint32 `protobuf:"varint,5,opt,name=next_hop_vrf_id,json=nextHopVrfId,proto3" json:"next_hop_vrf_id,omitempty"` - OutgoingInterface string `protobuf:"bytes,6,opt,name=outgoing_interface,json=outgoingInterface,proto3" json:"outgoing_interface,omitempty"` - LookupVrfId uint32 `protobuf:"varint,7,opt,name=lookup_vrf_id,json=lookupVrfId,proto3" json:"lookup_vrf_id,omitempty"` - Weight uint32 `protobuf:"varint,8,opt,name=weight,proto3" json:"weight,omitempty"` - Preference uint32 `protobuf:"varint,9,opt,name=preference,proto3" json:"preference,omitempty"` + Type StaticRoutes_Route_RouteType `protobuf:"varint,1,opt,name=type,proto3,enum=l3.StaticRoutes_Route_RouteType" json:"type,omitempty"` + VrfId uint32 `protobuf:"varint,2,opt,name=vrf_id,json=vrfId,proto3" json:"vrf_id,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + DstIpAddr string `protobuf:"bytes,4,opt,name=dst_ip_addr,json=dstIpAddr,proto3" json:"dst_ip_addr,omitempty"` + NextHopAddr string `protobuf:"bytes,5,opt,name=next_hop_addr,json=nextHopAddr,proto3" json:"next_hop_addr,omitempty"` + ViaVrfId uint32 `protobuf:"varint,6,opt,name=via_vrf_id,json=viaVrfId,proto3" json:"via_vrf_id,omitempty"` + OutgoingInterface string `protobuf:"bytes,7,opt,name=outgoing_interface,json=outgoingInterface,proto3" json:"outgoing_interface,omitempty"` + Weight uint32 `protobuf:"varint,9,opt,name=weight,proto3" json:"weight,omitempty"` + Preference uint32 `protobuf:"varint,10,opt,name=preference,proto3" json:"preference,omitempty"` } func (m *StaticRoutes_Route) Reset() { *m = StaticRoutes_Route{} } @@ -65,6 +88,13 @@ func (m *StaticRoutes_Route) String() string { return proto.CompactTe func (*StaticRoutes_Route) ProtoMessage() {} func (*StaticRoutes_Route) Descriptor() ([]byte, []int) { return fileDescriptorL3, []int{0, 0} } +func (m *StaticRoutes_Route) GetType() StaticRoutes_Route_RouteType { + if m != nil { + return m.Type + } + return StaticRoutes_Route_INTRA_VRF +} + func (m *StaticRoutes_Route) GetVrfId() uint32 { if m != nil { return m.VrfId @@ -93,9 +123,9 @@ func (m *StaticRoutes_Route) GetNextHopAddr() string { return "" } -func (m *StaticRoutes_Route) GetNextHopVrfId() uint32 { +func (m *StaticRoutes_Route) GetViaVrfId() uint32 { if m != nil { - return m.NextHopVrfId + return m.ViaVrfId } return 0 } @@ -107,13 +137,6 @@ func (m *StaticRoutes_Route) GetOutgoingInterface() string { return "" } -func (m *StaticRoutes_Route) GetLookupVrfId() uint32 { - if m != nil { - return m.LookupVrfId - } - return 0 -} - func (m *StaticRoutes_Route) GetWeight() uint32 { if m != nil { return m.Weight @@ -371,48 +394,51 @@ func init() { proto.RegisterType((*ProxyArpInterfaces_InterfaceList_Interface)(nil), "l3.ProxyArpInterfaces.InterfaceList.Interface") proto.RegisterType((*STNTable)(nil), "l3.STNTable") proto.RegisterType((*STNTable_STNTableEntry)(nil), "l3.STNTable.STNTableEntry") + proto.RegisterEnum("l3.StaticRoutes_Route_RouteType", StaticRoutes_Route_RouteType_name, StaticRoutes_Route_RouteType_value) } func init() { proto.RegisterFile("l3.proto", fileDescriptorL3) } var fileDescriptorL3 = []byte{ - // 601 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0xd5, 0xa4, 0x8d, 0x6b, 0x5f, 0x27, 0xad, 0xbe, 0xd1, 0xd7, 0x60, 0xac, 0x52, 0x82, 0x05, - 0x52, 0x36, 0x78, 0x41, 0x24, 0x16, 0x54, 0x2c, 0xb2, 0x40, 0x22, 0x52, 0xa9, 0x90, 0x5b, 0xb1, - 0xb5, 0x9c, 0x78, 0x92, 0x8c, 0x30, 0xb6, 0x35, 0x33, 0x29, 0xcd, 0x96, 0x77, 0x80, 0x05, 0x0f, - 0xc2, 0x2b, 0xf0, 0x32, 0xec, 0xd9, 0xb0, 0x40, 0xbe, 0x33, 0x76, 0x9c, 0x48, 0x20, 0x56, 0xbe, - 0x3f, 0x67, 0xce, 0xdc, 0x7b, 0xe6, 0xc8, 0x60, 0x67, 0xe3, 0xb0, 0x14, 0x85, 0x2a, 0x68, 0x27, - 0x1b, 0x07, 0x3f, 0x3b, 0xd0, 0xbb, 0x56, 0x89, 0xe2, 0xf3, 0xa8, 0x58, 0x2b, 0x26, 0x69, 0x08, - 0x96, 0xc0, 0xc8, 0x23, 0xc3, 0x83, 0x91, 0xfb, 0x6c, 0x10, 0x66, 0xe3, 0xb0, 0x8d, 0x08, 0xf1, - 0x13, 0x19, 0x94, 0xff, 0xad, 0x03, 0x5d, 0xac, 0xd0, 0x53, 0xb0, 0x6e, 0xc5, 0x22, 0xe6, 0xa9, - 0x47, 0x86, 0x64, 0xd4, 0x8f, 0xba, 0xb7, 0x62, 0x31, 0x4d, 0xe9, 0x10, 0xdc, 0x94, 0xc9, 0xb9, - 0xe0, 0xa5, 0xe2, 0x45, 0xee, 0x75, 0x86, 0x64, 0xe4, 0x44, 0xed, 0x12, 0x3d, 0x07, 0x37, 0x95, - 0x2a, 0xe6, 0x65, 0x9c, 0xa4, 0xa9, 0xf0, 0x0e, 0x10, 0xe1, 0xa4, 0x52, 0x4d, 0xcb, 0x49, 0x9a, - 0x0a, 0x1a, 0x40, 0x3f, 0x67, 0x77, 0x2a, 0x5e, 0x15, 0x06, 0x71, 0xa8, 0x39, 0xaa, 0xe2, 0xeb, - 0x42, 0x63, 0x9e, 0xc0, 0x49, 0x83, 0x31, 0x53, 0x74, 0x71, 0x8a, 0x9e, 0x41, 0xbd, 0xc3, 0x61, - 0x9e, 0x02, 0x2d, 0xd6, 0x6a, 0x59, 0xf0, 0x7c, 0x19, 0xf3, 0x5c, 0x31, 0xb1, 0x48, 0xe6, 0xcc, - 0xb3, 0x90, 0xef, 0xbf, 0xba, 0x33, 0xad, 0x1b, 0xd5, 0xcd, 0x59, 0x51, 0xbc, 0x5f, 0x37, 0x9c, - 0x47, 0xc8, 0xe9, 0xea, 0xa2, 0xa6, 0x1c, 0x80, 0xf5, 0x91, 0xf1, 0xe5, 0x4a, 0x79, 0x36, 0x36, - 0x4d, 0x46, 0xcf, 0x01, 0x4a, 0xc1, 0x16, 0x4c, 0xb0, 0x7c, 0xce, 0x3c, 0x07, 0x7b, 0xad, 0x4a, - 0xf0, 0x9d, 0x80, 0x3d, 0x11, 0xe5, 0x4d, 0x32, 0xcb, 0x18, 0x7d, 0x0e, 0x6e, 0x22, 0xca, 0x98, - 0xe5, 0x4a, 0xf0, 0x46, 0xfa, 0xd3, 0x4a, 0xfa, 0x1a, 0x52, 0x05, 0xaf, 0x72, 0x25, 0x36, 0x11, - 0x24, 0x3a, 0xe2, 0x4c, 0xfa, 0x9f, 0x34, 0x09, 0x36, 0xe8, 0x19, 0x38, 0xdb, 0x9d, 0x88, 0x56, - 0xb1, 0x29, 0xd0, 0x07, 0x00, 0x46, 0x61, 0x26, 0xa5, 0x79, 0x06, 0x87, 0xa3, 0x7a, 0x4c, 0x4a, - 0xfa, 0x08, 0x7a, 0xe5, 0x6a, 0x23, 0x1b, 0x80, 0x7e, 0x05, 0xb7, 0xaa, 0xd5, 0x90, 0x01, 0x58, - 0x12, 0x8d, 0x80, 0x0f, 0x60, 0x47, 0x26, 0x0b, 0x7e, 0x10, 0x38, 0x7e, 0x2b, 0x8a, 0xbb, 0xcd, - 0x44, 0x94, 0x51, 0x92, 0x2f, 0x99, 0xa4, 0x2f, 0xc1, 0x15, 0x55, 0x14, 0x67, 0x5c, 0xaa, 0x7a, - 0x9f, 0xb3, 0x6a, 0x9f, 0x5d, 0x60, 0x88, 0x9f, 0x4b, 0x2e, 0x55, 0x04, 0xa2, 0x0e, 0xa5, 0xff, - 0x95, 0x80, 0xd3, 0x74, 0xe8, 0xff, 0xd0, 0xcd, 0x92, 0x19, 0xcb, 0xcc, 0x4e, 0x3a, 0xa1, 0x2f, - 0xc0, 0xc2, 0x13, 0xd5, 0x2e, 0x15, 0x7b, 0xf0, 0x37, 0x76, 0x1d, 0x45, 0xe6, 0x84, 0x7f, 0x01, - 0x5d, 0x2c, 0xd0, 0xfb, 0x60, 0x2f, 0xb8, 0x40, 0xf3, 0x19, 0xf6, 0x23, 0xcc, 0xa7, 0x25, 0xbd, - 0x07, 0x47, 0x59, 0xa2, 0x3b, 0x5a, 0x2c, 0xab, 0x4a, 0xa7, 0x65, 0xf0, 0x8b, 0x00, 0xad, 0xef, - 0x69, 0xac, 0x22, 0xe9, 0x1b, 0x38, 0x69, 0xc4, 0xde, 0x59, 0xfb, 0x71, 0x7b, 0xb0, 0xed, 0x81, - 0xb0, 0x09, 0x71, 0xfd, 0x63, 0xde, 0x4e, 0xa5, 0xff, 0x85, 0x40, 0x7f, 0x07, 0xf1, 0x07, 0x19, - 0xae, 0x00, 0x9a, 0x93, 0xb5, 0x14, 0xe1, 0xbf, 0xdc, 0xb8, 0xcd, 0xa2, 0x16, 0x83, 0xff, 0x10, - 0x9c, 0xad, 0xff, 0x29, 0x1c, 0xe6, 0xc9, 0x87, 0xda, 0x4c, 0x18, 0x07, 0x9f, 0x09, 0xd8, 0xd7, - 0x37, 0x57, 0xda, 0xb7, 0x17, 0xe0, 0x4a, 0x95, 0xef, 0xf9, 0xd6, 0xc7, 0x5f, 0x86, 0x81, 0x34, - 0x81, 0x31, 0xaf, 0x54, 0x79, 0x6d, 0xde, 0x4b, 0xe8, 0xef, 0x34, 0xf7, 0x2c, 0x4a, 0xf6, 0x2d, - 0xba, 0xe3, 0xef, 0xce, 0x9e, 0xbf, 0x67, 0x16, 0xfe, 0xd4, 0xc6, 0xbf, 0x03, 0x00, 0x00, 0xff, - 0xff, 0x37, 0xf9, 0x27, 0x79, 0xe0, 0x04, 0x00, 0x00, + // 635 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xc1, 0x6e, 0xd3, 0x4c, + 0x10, 0xfe, 0x37, 0x6d, 0xdc, 0x78, 0xd2, 0xf4, 0x87, 0x15, 0x2d, 0xc6, 0x2a, 0x25, 0x58, 0x1c, + 0xc2, 0x01, 0x1f, 0x1a, 0xc4, 0x81, 0x8a, 0x43, 0x0e, 0x45, 0x44, 0x2a, 0x15, 0xda, 0x46, 0xbd, + 0x5a, 0x6e, 0xbc, 0x49, 0x57, 0x32, 0xf6, 0x6a, 0x77, 0x1b, 0x9a, 0x2b, 0xef, 0x00, 0x07, 0x5e, + 0x81, 0x07, 0xe1, 0x5d, 0x10, 0x8f, 0xc0, 0x01, 0x79, 0xbc, 0x76, 0x9c, 0x08, 0x10, 0x97, 0x78, + 0xbe, 0x99, 0x6f, 0xc6, 0x3b, 0xdf, 0x7e, 0x31, 0x74, 0xd2, 0x61, 0x28, 0x55, 0x6e, 0x72, 0xda, + 0x4a, 0x87, 0xc1, 0xd7, 0x2d, 0xd8, 0xbd, 0x30, 0xb1, 0x11, 0x53, 0x96, 0xdf, 0x18, 0xae, 0x69, + 0x08, 0x8e, 0xc2, 0xc8, 0x23, 0xfd, 0xad, 0x41, 0xf7, 0xf8, 0x20, 0x4c, 0x87, 0x61, 0x93, 0x11, + 0xe2, 0x83, 0x59, 0x96, 0xff, 0xbd, 0x05, 0x6d, 0xcc, 0xd0, 0xe7, 0xb0, 0x6d, 0x96, 0x92, 0x7b, + 0xa4, 0x4f, 0x06, 0x7b, 0xc7, 0xfd, 0xdf, 0xf7, 0x95, 0xbf, 0x93, 0xa5, 0xe4, 0x0c, 0xd9, 0x74, + 0x1f, 0x9c, 0x85, 0x9a, 0x45, 0x22, 0xf1, 0x5a, 0x7d, 0x32, 0xe8, 0xb1, 0xf6, 0x42, 0xcd, 0xc6, + 0x09, 0xed, 0x43, 0x37, 0xe1, 0x7a, 0xaa, 0x84, 0x34, 0x22, 0xcf, 0xbc, 0xad, 0x3e, 0x19, 0xb8, + 0xac, 0x99, 0xa2, 0x47, 0xd0, 0x4d, 0xb4, 0x89, 0x84, 0x8c, 0xe2, 0x24, 0x51, 0xde, 0x36, 0x32, + 0xdc, 0x44, 0x9b, 0xb1, 0x1c, 0x25, 0x89, 0xa2, 0x01, 0xf4, 0x32, 0x7e, 0x6b, 0xa2, 0xeb, 0xdc, + 0x32, 0xda, 0xe5, 0x8c, 0x22, 0xf9, 0x26, 0x2f, 0x39, 0x87, 0x00, 0x0b, 0x11, 0x47, 0xf6, 0x00, + 0x0e, 0x1e, 0xa0, 0xb3, 0x10, 0xf1, 0x25, 0x9e, 0xe1, 0x19, 0xd0, 0xfc, 0xc6, 0xcc, 0x73, 0x91, + 0xcd, 0x23, 0x91, 0x19, 0xae, 0x66, 0xf1, 0x94, 0x7b, 0x3b, 0x38, 0xe6, 0x6e, 0x55, 0x19, 0x57, + 0x05, 0x7a, 0x00, 0xce, 0x07, 0x2e, 0xe6, 0xd7, 0xc6, 0x73, 0x71, 0x90, 0x45, 0xf4, 0x08, 0x40, + 0x2a, 0x3e, 0xe3, 0x8a, 0x67, 0x53, 0xee, 0x01, 0xd6, 0x1a, 0x99, 0xe0, 0x29, 0xb8, 0xb5, 0x28, + 0xb4, 0x07, 0xee, 0xf8, 0x7c, 0xc2, 0x46, 0xd1, 0x25, 0x7b, 0x7d, 0xe7, 0x3f, 0x0b, 0x4f, 0x19, + 0x42, 0x12, 0x7c, 0x23, 0xd0, 0x19, 0x29, 0x39, 0x89, 0xaf, 0x52, 0x4e, 0x5f, 0x40, 0x37, 0x56, + 0x32, 0xe2, 0x99, 0x51, 0xa2, 0xbe, 0xae, 0xfd, 0x42, 0xf6, 0x8a, 0x52, 0x04, 0xa7, 0x99, 0x51, + 0x4b, 0x06, 0x71, 0x19, 0x09, 0xae, 0xfd, 0x8f, 0xe5, 0x10, 0x2c, 0xd0, 0x43, 0x70, 0x57, 0xab, + 0x91, 0x52, 0xc3, 0x3a, 0x41, 0x1f, 0x02, 0x58, 0x7d, 0xb9, 0xd6, 0x78, 0x41, 0x45, 0x19, 0xb5, + 0xe3, 0x5a, 0xd3, 0xc7, 0xb0, 0x2b, 0xaf, 0x97, 0xba, 0x26, 0xd8, 0x5b, 0x2a, 0x72, 0x15, 0xe5, + 0x00, 0x1c, 0x8d, 0x26, 0xc0, 0x0b, 0xea, 0x30, 0x8b, 0x82, 0x1f, 0x04, 0xf6, 0xde, 0xa9, 0xfc, + 0x76, 0x39, 0x52, 0x92, 0xc5, 0xd9, 0x9c, 0x6b, 0xfa, 0x0a, 0xba, 0xaa, 0x88, 0xa2, 0x54, 0x68, + 0x53, 0xed, 0x73, 0x58, 0xec, 0xb3, 0x4e, 0x0c, 0xf1, 0x71, 0x26, 0xb4, 0x61, 0xa0, 0xaa, 0x50, + 0xfb, 0x5f, 0x08, 0xb8, 0x75, 0x85, 0xde, 0x83, 0x76, 0x1a, 0x5f, 0xf1, 0xd4, 0xee, 0x54, 0x02, + 0xfa, 0x12, 0x1c, 0xec, 0x28, 0x76, 0x29, 0xa6, 0x07, 0x7f, 0x9b, 0x5e, 0x46, 0xcc, 0x76, 0xf8, + 0x27, 0xd0, 0xc6, 0x04, 0x7d, 0x00, 0x9d, 0x99, 0x50, 0x68, 0x3d, 0x3b, 0x7d, 0x07, 0xf1, 0x58, + 0xd2, 0xfb, 0xb0, 0x93, 0xc6, 0x65, 0xa5, 0x14, 0xcb, 0x29, 0xe0, 0x58, 0x06, 0x3f, 0x09, 0xd0, + 0xea, 0x3d, 0xb5, 0x63, 0x34, 0x7d, 0x0b, 0xff, 0xd7, 0x62, 0xaf, 0xad, 0xfd, 0xa4, 0x79, 0xb0, + 0x55, 0x43, 0x58, 0x87, 0xb8, 0xfe, 0x9e, 0x68, 0x42, 0xed, 0x7f, 0x26, 0xd0, 0x5b, 0x63, 0xfc, + 0x41, 0x86, 0x73, 0x80, 0xba, 0xb3, 0x92, 0x22, 0xfc, 0x97, 0x37, 0xae, 0x10, 0x6b, 0x4c, 0xf0, + 0x1f, 0x81, 0xbb, 0xfa, 0x1b, 0x50, 0xd8, 0xce, 0xe2, 0xf7, 0x95, 0x99, 0x30, 0x0e, 0x3e, 0x11, + 0xe8, 0x5c, 0x4c, 0xce, 0x4b, 0xdf, 0x9e, 0x40, 0x57, 0x9b, 0x6c, 0xc3, 0xb7, 0x3e, 0x7e, 0x2e, + 0x2c, 0xa5, 0x0e, 0xac, 0x79, 0xb5, 0xc9, 0x2a, 0xf3, 0x9e, 0x41, 0x6f, 0xad, 0xb8, 0x61, 0x51, + 0xb2, 0x69, 0xd1, 0x35, 0x7f, 0xb7, 0x36, 0xfc, 0x7d, 0xe5, 0xe0, 0x87, 0x70, 0xf8, 0x2b, 0x00, + 0x00, 0xff, 0xff, 0xc4, 0xdc, 0x1a, 0x5c, 0x14, 0x05, 0x00, 0x00, } diff --git a/plugins/vpp/model/l3/l3.proto b/plugins/vpp/model/l3/l3.proto index f8e17a15f1..0662b27055 100644 --- a/plugins/vpp/model/l3/l3.proto +++ b/plugins/vpp/model/l3/l3.proto @@ -5,19 +5,26 @@ package l3; /* Static routes */ message StaticRoutes { message Route { - uint32 vrf_id = 1; /* VRF identifier, field required for remote client. This value should be + enum RouteType { + INTRA_VRF = 0; /* Forwarding is being done in the specified vrf_id only, or according to + the specified outgoing interface. */ + INTER_VRF = 1; /* Forwarding is being done by lookup into a different VRF, + specified as via_vrf_id field. In case of these routes, the outgoing + interface should not be specified. The next hop IP address + does not have to be specified either, in that case VPP does full + recursive lookup in the via_vrf_id VRF. */ + } + RouteType type = 1; + uint32 vrf_id = 2; /* VRF identifier, field required for remote client. This value should be consistent with VRF ID in static route key. If it is not, value from key will be preffered and this field will be overriden. */ - string description = 2; /* optional description */ - string dst_ip_addr = 3; /* ip address + prefix in format
/ */ - string next_hop_addr = 4; /* next hop address */ - uint32 next_hop_vrf_id = 5; /* Next hop VRF ID. Should be equal to the vrf_id unless inter-VRF - routing is needed. */ - string outgoing_interface = 6; /* outgoing interface name */ - uint32 lookup_vrf_id = 7; /* Do a lookup in the specified VRF. In effect only if the next_hop_addr - nor the outgoing_interface is set. */ - uint32 weight = 8; /* weight (used for unequal cost load balncing) */ - uint32 preference = 9; /* The preference of the path. Lowest preference is preferred. */ + string description = 3; /* optional description */ + string dst_ip_addr = 4; /* ip address + prefix in format
/ */ + string next_hop_addr = 5; /* next hop address */ + uint32 via_vrf_id = 6; /* Specifies VRF ID for the next hop lookup / recursive lookup */ + string outgoing_interface = 7; /* outgoing interface name */ + uint32 weight = 9; /* weight (used for unequal cost load balncing) */ + uint32 preference = 10; /* The preference of the path. Lowest preference is preferred. */ /* Only paths with the best preference contribute to forwarding. */ /* (a poor man's primary and backup) */ } From f0ab49eaafc2afb27de7a2025068c686082e0064 Mon Sep 17 00:00:00 2001 From: Ondrej Fabry Date: Thu, 19 Jul 2018 10:50:02 +0200 Subject: [PATCH 018/174] Include mc and nano in dev image Signed-off-by: Ondrej Fabry --- docker/dev/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/dev/Dockerfile b/docker/dev/Dockerfile index 89645c0b3e..082c6ee961 100644 --- a/docker/dev/Dockerfile +++ b/docker/dev/Dockerfile @@ -35,7 +35,7 @@ RUN apt-get update \ autoconf automake build-essential ca-certificates curl gdb git \ inetutils-traceroute iproute2 ipsec-tools iputils-ping \ libapr1 libmbedcrypto0 libmbedtls10 libmbedx509-0 libtool \ - make netcat python software-properties-common sudo supervisor \ + make mc nano netcat python software-properties-common sudo supervisor \ telnet unzip wget \ && rm -rf /var/lib/apt/lists/* From 59c62c6808e3d9a215bb88787958d12d15e506a8 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Thu, 19 Jul 2018 10:54:02 +0200 Subject: [PATCH 019/174] retry binary api request if vpp disconnects Signed-off-by: Vladimir Lavor --- plugins/govppmux/govpp_channel.go | 122 +++++++++++++++++++++++ plugins/govppmux/plugin_impl_govppmux.go | 42 +++++--- 2 files changed, 151 insertions(+), 13 deletions(-) create mode 100644 plugins/govppmux/govpp_channel.go diff --git a/plugins/govppmux/govpp_channel.go b/plugins/govppmux/govpp_channel.go new file mode 100644 index 0000000000..a5b92ce943 --- /dev/null +++ b/plugins/govppmux/govpp_channel.go @@ -0,0 +1,122 @@ +package govppmux + +import ( + "time" + + govppapi "git.fd.io/govpp.git/api" + "git.fd.io/govpp.git/core" + "github.com/ligato/cn-infra/logging/logrus" +) + +const defaultRetryRequestTimeout = 500 * time.Millisecond + +// goVppChan implements govpp channel interface. Instance is returned by NewAPIChannel() or NewAPIChannelBuffered(), +// and contains *govpp.channel dynamic type (vppChan field). Implemented methods allow custom handling of low-level +// govpp. +type goVppChan struct { + vppChan govppapi.Channel + // Retry data + retry *retryConfig +} + +// govppRequestCtx is custom govpp RequestCtx. +type govppRequestCtx struct { + // Original request context + requestCtx govppapi.RequestCtx + // Function allowing to re-send request in case it's granted by the config file + sendRequest func(govppapi.Message) govppapi.RequestCtx + // Parameter for sendRequest + requestMsg govppapi.Message + // Retry data + retry *retryConfig +} + +// helper struct holding info about retry configuration +type retryConfig struct { + attempts int + timeout time.Duration +} + +// ReceiveReply handles request and returns error if occurred. Also does retry if this option is available. +func (r *govppRequestCtx) ReceiveReply(reply govppapi.Message) error { + var err error + // Receive reply from original send + if err = r.requestCtx.ReceiveReply(reply); err != nil && err == core.ErrNotConnected { + if r.retry != nil && r.retry.attempts > 0 { + // Set default timeout between retries if not set + if r.retry.timeout == 0 { + r.retry.timeout = defaultRetryRequestTimeout + } + // Try to re-sent requests + for i := 1; i <= r.retry.attempts; i++ { + logrus.DefaultLogger().Warnf("Retrying message %v: %d", r.requestMsg.GetMessageName(), i) + ctx := r.sendRequest(r.requestMsg) + if err = ctx.ReceiveReply(reply); err == nil { + return nil + } + time.Sleep(r.retry.timeout) + } + } + } + + return err +} + +// SendRequest sends asynchronous request to the vpp and receives context used to receive reply. +// Plugin govppmux allows to re-send retry which failed because of disconnected vpp, if enabled. +func (c *goVppChan) SendRequest(request govppapi.Message) govppapi.RequestCtx { + sendRequest := c.vppChan.SendRequest + // Send request now and wait for context + requestCtx := sendRequest(request) + + // Return context with value and function which allows to send request again if needed + return &govppRequestCtx{requestCtx, sendRequest, request, c.retry} +} + +func (c *goVppChan) SendMultiRequest(request govppapi.Message) govppapi.MultiRequestCtx { + return c.vppChan.SendMultiRequest(request) +} + +func (c *goVppChan) SubscribeNotification(notifChan chan govppapi.Message, msgFactory func() govppapi.Message) (*govppapi.NotifSubscription, error) { + return c.vppChan.SubscribeNotification(notifChan, msgFactory) +} + +func (c *goVppChan) UnsubscribeNotification(subscription *govppapi.NotifSubscription) error { + return c.vppChan.UnsubscribeNotification(subscription) +} + +func (c *goVppChan) CheckMessageCompatibility(messages ...govppapi.Message) error { + return c.vppChan.CheckMessageCompatibility(messages...) +} + +func (c *goVppChan) SetReplyTimeout(timeout time.Duration) { + c.vppChan.SetReplyTimeout(timeout) +} + +func (c *goVppChan) GetRequestChannel() chan<- *govppapi.VppRequest { + return c.vppChan.GetRequestChannel() +} + +func (c *goVppChan) GetReplyChannel() <-chan *govppapi.VppReply { + return c.vppChan.GetReplyChannel() +} + +func (c *goVppChan) GetNotificationChannel() chan<- *govppapi.NotifSubscribeRequest { + return c.vppChan.GetNotificationChannel() +} + +func (c *goVppChan) GetNotificationReplyChannel() <-chan error { + return c.vppChan.GetNotificationReplyChannel() +} + +func (c *goVppChan) GetMessageDecoder() govppapi.MessageDecoder { + return c.vppChan.GetMessageDecoder() +} + +func (c *goVppChan) GetID() uint16 { + return c.vppChan.GetID() +} + +func (c *goVppChan) Close() { + c.vppChan.Close() +} diff --git a/plugins/govppmux/plugin_impl_govppmux.go b/plugins/govppmux/plugin_impl_govppmux.go index ce8518023b..9beecab471 100644 --- a/plugins/govppmux/plugin_impl_govppmux.go +++ b/plugins/govppmux/plugin_impl_govppmux.go @@ -49,6 +49,8 @@ type GOVPPPlugin struct { reconnectResync bool lastConnErr error + config *Config + // Cancel can be used to cancel all goroutines and their jobs inside of the plugin. cancel context.CancelFunc // Wait group allows to wait until all goroutines of the plugin have finished. @@ -72,10 +74,14 @@ type Config struct { // shared memory segments are created directly in the SHM directory /dev/shm. ShmPrefix string `json:"shm-prefix"` ReconnectResync bool `json:"resync-after-reconnect"` + // How many times can be request resent in case vpp is suddenly disconnected. + RetryRequestCount int `json:"retry-request-count"` + // Time between request resend attempts. Default is 500ms. + RetryRequestTimeout time.Duration `json:"retry-request-timeout"` } -func defaultConfig() Config { - return Config{ +func defaultConfig() *Config { + return &Config{ HealthCheckProbeInterval: time.Second, HealthCheckReplyTimeout: 100 * time.Millisecond, HealthCheckThreshold: 1, @@ -103,20 +109,20 @@ func (plugin *GOVPPPlugin) Init() error { plugin.PluginName = plugin.Deps.PluginName - cfg := defaultConfig() - found, err := plugin.PluginConfig.GetValue(&cfg) + plugin.config = defaultConfig() + found, err := plugin.PluginConfig.GetValue(plugin.config) if err != nil { return err } var shmPrefix string if found { - govpp.SetHealthCheckProbeInterval(cfg.HealthCheckProbeInterval) - govpp.SetHealthCheckReplyTimeout(cfg.HealthCheckReplyTimeout) - govpp.SetHealthCheckThreshold(cfg.HealthCheckThreshold) - plugin.replyTimeout = cfg.ReplyTimeout - plugin.reconnectResync = cfg.ReconnectResync - shmPrefix = cfg.ShmPrefix - plugin.Log.Debug("Setting govpp parameters", cfg) + govpp.SetHealthCheckProbeInterval(plugin.config.HealthCheckProbeInterval) + govpp.SetHealthCheckReplyTimeout(plugin.config.HealthCheckReplyTimeout) + govpp.SetHealthCheckThreshold(plugin.config.HealthCheckThreshold) + plugin.replyTimeout = plugin.config.ReplyTimeout + plugin.reconnectResync = plugin.config.ReconnectResync + shmPrefix = plugin.config.ShmPrefix + plugin.Log.Debug("Setting govpp parameters", plugin.config) } if plugin.vppAdapter == nil { @@ -181,7 +187,12 @@ func (plugin *GOVPPPlugin) NewAPIChannel() (govppapi.Channel, error) { if plugin.replyTimeout > 0 { ch.SetReplyTimeout(plugin.replyTimeout) } - return ch, nil + retryCfg := &retryConfig{} + if plugin.config != nil { + retryCfg.attempts = plugin.config.RetryRequestCount + retryCfg.timeout = plugin.config.RetryRequestTimeout + } + return &goVppChan{ch, retryCfg}, nil } // NewAPIChannelBuffered returns a new API channel for communication with VPP via govpp core. @@ -198,7 +209,12 @@ func (plugin *GOVPPPlugin) NewAPIChannelBuffered(reqChanBufSize, replyChanBufSiz if plugin.replyTimeout > 0 { ch.SetReplyTimeout(plugin.replyTimeout) } - return ch, nil + retryCfg := &retryConfig{} + if plugin.config != nil { + retryCfg.attempts = plugin.config.RetryRequestCount + retryCfg.timeout = plugin.config.RetryRequestTimeout + } + return &goVppChan{ch, retryCfg}, nil } // handleVPPConnectionEvents handles VPP connection events. From 8539383c57089cc47b68cf109eff733fdde7773d Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Thu, 19 Jul 2018 13:22:48 +0200 Subject: [PATCH 020/174] added conf.file description + address comments Signed-off-by: Vladimir Lavor --- plugins/govppmux/govpp.conf | 8 +++ plugins/govppmux/govpp_channel.go | 89 ++++++------------------ plugins/govppmux/plugin_impl_govppmux.go | 5 +- 3 files changed, 32 insertions(+), 70 deletions(-) diff --git a/plugins/govppmux/govpp.conf b/plugins/govppmux/govpp.conf index 28ff5458b6..f0fece8b53 100644 --- a/plugins/govppmux/govpp.conf +++ b/plugins/govppmux/govpp.conf @@ -4,3 +4,11 @@ shm-prefix: # If VPP lost connection, this flag allows to automatically run the whole resync procedure # for all registered plugins after reconnection. resync-after-reconnect: false + +# Requests failed because of the temporary VPP disconnect can be re-tried. Field defines number of +# retry attempts. Default is zero, meaning the feature is disabled. +retry-request-count: 0 + +# Defines timeout between retry attempts. Default value is 500ms. If retry-request-count is set to zero, +# the field has no effect. +retry-request-timeout: 500000000 diff --git a/plugins/govppmux/govpp_channel.go b/plugins/govppmux/govpp_channel.go index a5b92ce943..5c96500587 100644 --- a/plugins/govppmux/govpp_channel.go +++ b/plugins/govppmux/govpp_channel.go @@ -8,15 +8,13 @@ import ( "github.com/ligato/cn-infra/logging/logrus" ) -const defaultRetryRequestTimeout = 500 * time.Millisecond - // goVppChan implements govpp channel interface. Instance is returned by NewAPIChannel() or NewAPIChannelBuffered(), // and contains *govpp.channel dynamic type (vppChan field). Implemented methods allow custom handling of low-level // govpp. type goVppChan struct { - vppChan govppapi.Channel + govppapi.Channel // Retry data - retry *retryConfig + retry retryConfig } // govppRequestCtx is custom govpp RequestCtx. @@ -28,7 +26,7 @@ type govppRequestCtx struct { // Parameter for sendRequest requestMsg govppapi.Message // Retry data - retry *retryConfig + retry retryConfig } // helper struct holding info about retry configuration @@ -39,22 +37,25 @@ type retryConfig struct { // ReceiveReply handles request and returns error if occurred. Also does retry if this option is available. func (r *govppRequestCtx) ReceiveReply(reply govppapi.Message) error { + var timeout time.Duration + maxAttempts := r.retry.attempts + if r.retry.timeout > 0 { // Default value is 500ms + timeout = r.retry.timeout + } + var err error // Receive reply from original send - if err = r.requestCtx.ReceiveReply(reply); err != nil && err == core.ErrNotConnected { - if r.retry != nil && r.retry.attempts > 0 { - // Set default timeout between retries if not set - if r.retry.timeout == 0 { - r.retry.timeout = defaultRetryRequestTimeout - } - // Try to re-sent requests - for i := 1; i <= r.retry.attempts; i++ { - logrus.DefaultLogger().Warnf("Retrying message %v: %d", r.requestMsg.GetMessageName(), i) - ctx := r.sendRequest(r.requestMsg) - if err = ctx.ReceiveReply(reply); err == nil { - return nil - } - time.Sleep(r.retry.timeout) + if err = r.requestCtx.ReceiveReply(reply); err != nil && err == core.ErrNotConnected && maxAttempts > 0 { + // Try to re-sent requests + for attemptIdx := 1; attemptIdx <= maxAttempts; attemptIdx++ { + logrus.DefaultLogger().Warnf("Retrying message %v, attempt: %d", r.requestMsg.GetMessageName(), attemptIdx) + if err = r.sendRequest(r.requestMsg).ReceiveReply(reply); err != nil && err == core.ErrNotConnected && attemptIdx != maxAttempts { + time.Sleep(timeout) + continue + } else if err != nil { + return err + } else { + return nil } } } @@ -65,58 +66,10 @@ func (r *govppRequestCtx) ReceiveReply(reply govppapi.Message) error { // SendRequest sends asynchronous request to the vpp and receives context used to receive reply. // Plugin govppmux allows to re-send retry which failed because of disconnected vpp, if enabled. func (c *goVppChan) SendRequest(request govppapi.Message) govppapi.RequestCtx { - sendRequest := c.vppChan.SendRequest + sendRequest := c.Channel.SendRequest // Send request now and wait for context requestCtx := sendRequest(request) // Return context with value and function which allows to send request again if needed return &govppRequestCtx{requestCtx, sendRequest, request, c.retry} } - -func (c *goVppChan) SendMultiRequest(request govppapi.Message) govppapi.MultiRequestCtx { - return c.vppChan.SendMultiRequest(request) -} - -func (c *goVppChan) SubscribeNotification(notifChan chan govppapi.Message, msgFactory func() govppapi.Message) (*govppapi.NotifSubscription, error) { - return c.vppChan.SubscribeNotification(notifChan, msgFactory) -} - -func (c *goVppChan) UnsubscribeNotification(subscription *govppapi.NotifSubscription) error { - return c.vppChan.UnsubscribeNotification(subscription) -} - -func (c *goVppChan) CheckMessageCompatibility(messages ...govppapi.Message) error { - return c.vppChan.CheckMessageCompatibility(messages...) -} - -func (c *goVppChan) SetReplyTimeout(timeout time.Duration) { - c.vppChan.SetReplyTimeout(timeout) -} - -func (c *goVppChan) GetRequestChannel() chan<- *govppapi.VppRequest { - return c.vppChan.GetRequestChannel() -} - -func (c *goVppChan) GetReplyChannel() <-chan *govppapi.VppReply { - return c.vppChan.GetReplyChannel() -} - -func (c *goVppChan) GetNotificationChannel() chan<- *govppapi.NotifSubscribeRequest { - return c.vppChan.GetNotificationChannel() -} - -func (c *goVppChan) GetNotificationReplyChannel() <-chan error { - return c.vppChan.GetNotificationReplyChannel() -} - -func (c *goVppChan) GetMessageDecoder() govppapi.MessageDecoder { - return c.vppChan.GetMessageDecoder() -} - -func (c *goVppChan) GetID() uint16 { - return c.vppChan.GetID() -} - -func (c *goVppChan) Close() { - c.vppChan.Close() -} diff --git a/plugins/govppmux/plugin_impl_govppmux.go b/plugins/govppmux/plugin_impl_govppmux.go index 9beecab471..a66be391e1 100644 --- a/plugins/govppmux/plugin_impl_govppmux.go +++ b/plugins/govppmux/plugin_impl_govppmux.go @@ -86,6 +86,7 @@ func defaultConfig() *Config { HealthCheckReplyTimeout: 100 * time.Millisecond, HealthCheckThreshold: 1, ReplyTimeout: time.Second, + RetryRequestTimeout: 500 * time.Millisecond, } } @@ -187,7 +188,7 @@ func (plugin *GOVPPPlugin) NewAPIChannel() (govppapi.Channel, error) { if plugin.replyTimeout > 0 { ch.SetReplyTimeout(plugin.replyTimeout) } - retryCfg := &retryConfig{} + retryCfg := retryConfig{} if plugin.config != nil { retryCfg.attempts = plugin.config.RetryRequestCount retryCfg.timeout = plugin.config.RetryRequestTimeout @@ -209,7 +210,7 @@ func (plugin *GOVPPPlugin) NewAPIChannelBuffered(reqChanBufSize, replyChanBufSiz if plugin.replyTimeout > 0 { ch.SetReplyTimeout(plugin.replyTimeout) } - retryCfg := &retryConfig{} + retryCfg := retryConfig{} if plugin.config != nil { retryCfg.attempts = plugin.config.RetryRequestCount retryCfg.timeout = plugin.config.RetryRequestTimeout From 2bd91005dc00ebeeb5f40c0e7faa3b6fa8cc278e Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Thu, 19 Jul 2018 13:58:50 +0200 Subject: [PATCH 021/174] split interface, remove unnecessary receivers and fix logs Signed-off-by: Vladimir Lavor --- .../vpp/aclplugin/vppcalls/acl_vppcalls.go | 116 +++++++++--------- .../vpp/aclplugin/vppcalls/api_vppcalls.go | 26 ++-- .../aclplugin/vppcalls/interfaces_vppcalls.go | 8 +- 3 files changed, 79 insertions(+), 71 deletions(-) diff --git a/plugins/vpp/aclplugin/vppcalls/acl_vppcalls.go b/plugins/vpp/aclplugin/vppcalls/acl_vppcalls.go index 0b247edb2d..8e04611929 100644 --- a/plugins/vpp/aclplugin/vppcalls/acl_vppcalls.go +++ b/plugins/vpp/aclplugin/vppcalls/acl_vppcalls.go @@ -22,43 +22,43 @@ import ( "time" govppapi "git.fd.io/govpp.git/api" - acl_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/acl" + aclapi "github.com/ligato/vpp-agent/plugins/vpp/binapi/acl" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" ) // AclMessages is list of used VPP messages for compatibility check var AclMessages = []govppapi.Message{ - &acl_api.ACLAddReplace{}, - &acl_api.ACLAddReplaceReply{}, - &acl_api.ACLDel{}, - &acl_api.ACLDelReply{}, - &acl_api.MacipACLAdd{}, - &acl_api.MacipACLAddReply{}, - &acl_api.MacipACLAddReplace{}, - &acl_api.MacipACLAddReplaceReply{}, - &acl_api.MacipACLDel{}, - &acl_api.MacipACLDelReply{}, - &acl_api.ACLDump{}, - &acl_api.ACLDetails{}, - &acl_api.MacipACLDump{}, - &acl_api.MacipACLDetails{}, - &acl_api.ACLInterfaceListDump{}, - &acl_api.ACLInterfaceListDetails{}, - &acl_api.MacipACLInterfaceListDump{}, - &acl_api.MacipACLInterfaceListDetails{}, - &acl_api.ACLInterfaceSetACLList{}, - &acl_api.ACLInterfaceSetACLListReply{}, - &acl_api.MacipACLInterfaceAddDel{}, - &acl_api.MacipACLInterfaceAddDelReply{}, + &aclapi.ACLAddReplace{}, + &aclapi.ACLAddReplaceReply{}, + &aclapi.ACLDel{}, + &aclapi.ACLDelReply{}, + &aclapi.MacipACLAdd{}, + &aclapi.MacipACLAddReply{}, + &aclapi.MacipACLAddReplace{}, + &aclapi.MacipACLAddReplaceReply{}, + &aclapi.MacipACLDel{}, + &aclapi.MacipACLDelReply{}, + &aclapi.ACLDump{}, + &aclapi.ACLDetails{}, + &aclapi.MacipACLDump{}, + &aclapi.MacipACLDetails{}, + &aclapi.ACLInterfaceListDump{}, + &aclapi.ACLInterfaceListDetails{}, + &aclapi.MacipACLInterfaceListDump{}, + &aclapi.MacipACLInterfaceListDetails{}, + &aclapi.ACLInterfaceSetACLList{}, + &aclapi.ACLInterfaceSetACLListReply{}, + &aclapi.MacipACLInterfaceAddDel{}, + &aclapi.MacipACLInterfaceAddDelReply{}, } func (handler *aclVppHandler) GetAclPluginVersion() (string, error) { defer func(t time.Time) { - handler.stopwatch.TimeLog(acl_api.ACLPluginGetVersion{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(aclapi.ACLPluginGetVersion{}).LogTimeEntry(time.Since(t)) }(time.Now()) - req := &acl_api.ACLPluginGetVersion{} - reply := &acl_api.ACLPluginGetVersionReply{} + req := &aclapi.ACLPluginGetVersion{} + reply := &aclapi.ACLPluginGetVersionReply{} // Does not return retval if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return "", fmt.Errorf("failed to get VPP ACL plugin version: %v", err) @@ -69,11 +69,11 @@ func (handler *aclVppHandler) GetAclPluginVersion() (string, error) { func (handler *aclVppHandler) AddIPAcl(rules []*acl.AccessLists_Acl_Rule, aclName string) (uint32, error) { defer func(t time.Time) { - handler.stopwatch.TimeLog(acl_api.ACLAddReplace{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(aclapi.ACLAddReplace{}).LogTimeEntry(time.Since(t)) }(time.Now()) // Prepare Ip rules - aclIPRules, err := handler.transformACLIpRules(rules) + aclIPRules, err := transformACLIpRules(rules) if err != nil { return 0, err } @@ -81,14 +81,14 @@ func (handler *aclVppHandler) AddIPAcl(rules []*acl.AccessLists_Acl_Rule, aclNam return 0, fmt.Errorf("no rules found for ACL %v", aclName) } - req := &acl_api.ACLAddReplace{ + req := &aclapi.ACLAddReplace{ ACLIndex: 0xffffffff, // to make new Entry Count: uint32(len(aclIPRules)), Tag: []byte(aclName), R: aclIPRules, } - reply := &acl_api.ACLAddReplaceReply{} + reply := &aclapi.ACLAddReplaceReply{} if err = handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return 0, fmt.Errorf("failed to write ACL %v: %v", aclName, err) } @@ -101,7 +101,7 @@ func (handler *aclVppHandler) AddIPAcl(rules []*acl.AccessLists_Acl_Rule, aclNam func (handler *aclVppHandler) AddMacIPAcl(rules []*acl.AccessLists_Acl_Rule, aclName string) (uint32, error) { defer func(t time.Time) { - handler.stopwatch.TimeLog(acl_api.MacipACLAdd{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(aclapi.MacipACLAdd{}).LogTimeEntry(time.Since(t)) }(time.Now()) // Prepare MAc Ip rules @@ -113,13 +113,13 @@ func (handler *aclVppHandler) AddMacIPAcl(rules []*acl.AccessLists_Acl_Rule, acl return 0, fmt.Errorf("no rules found for ACL %v", aclName) } - req := &acl_api.MacipACLAdd{ + req := &aclapi.MacipACLAdd{ Count: uint32(len(aclMacIPRules)), Tag: []byte(aclName), R: aclMacIPRules, } - reply := &acl_api.MacipACLAddReply{} + reply := &aclapi.MacipACLAddReply{} if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return 0, fmt.Errorf("failed to write ACL %v: %v", aclName, err) } @@ -132,11 +132,11 @@ func (handler *aclVppHandler) AddMacIPAcl(rules []*acl.AccessLists_Acl_Rule, acl func (handler *aclVppHandler) ModifyIPAcl(aclIndex uint32, rules []*acl.AccessLists_Acl_Rule, aclName string) error { defer func(t time.Time) { - handler.stopwatch.TimeLog(acl_api.ACLAddReplace{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(aclapi.ACLAddReplace{}).LogTimeEntry(time.Since(t)) }(time.Now()) // Prepare Ip rules - aclIPRules, err := handler.transformACLIpRules(rules) + aclIPRules, err := transformACLIpRules(rules) if err != nil { return err } @@ -144,14 +144,14 @@ func (handler *aclVppHandler) ModifyIPAcl(aclIndex uint32, rules []*acl.AccessLi return nil } - req := &acl_api.ACLAddReplace{ + req := &aclapi.ACLAddReplace{ ACLIndex: aclIndex, Count: uint32(len(aclIPRules)), Tag: []byte(aclName), R: aclIPRules, } - reply := &acl_api.ACLAddReplaceReply{} + reply := &aclapi.ACLAddReplaceReply{} if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return fmt.Errorf("failed to write ACL %v: %v", aclName, err) } @@ -165,7 +165,7 @@ func (handler *aclVppHandler) ModifyIPAcl(aclIndex uint32, rules []*acl.AccessLi func (handler *aclVppHandler) ModifyMACIPAcl(aclIndex uint32, rules []*acl.AccessLists_Acl_Rule, aclName string) error { defer func(t time.Time) { - handler.stopwatch.TimeLog(acl_api.ACLAddReplace{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(aclapi.ACLAddReplace{}).LogTimeEntry(time.Since(t)) }(time.Now()) // Prepare MAc Ip rules @@ -177,14 +177,14 @@ func (handler *aclVppHandler) ModifyMACIPAcl(aclIndex uint32, rules []*acl.Acces return fmt.Errorf("no rules found for ACL %v", aclName) } - req := &acl_api.MacipACLAddReplace{ + req := &aclapi.MacipACLAddReplace{ ACLIndex: aclIndex, Count: uint32(len(aclMacIPRules)), Tag: []byte(aclName), R: aclMacIPRules, } - reply := &acl_api.MacipACLAddReplaceReply{} + reply := &aclapi.MacipACLAddReplaceReply{} if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return fmt.Errorf("failed to write ACL %v: %v", aclName, err) } @@ -197,14 +197,14 @@ func (handler *aclVppHandler) ModifyMACIPAcl(aclIndex uint32, rules []*acl.Acces func (handler *aclVppHandler) DeleteIPAcl(aclIndex uint32) error { defer func(t time.Time) { - handler.stopwatch.TimeLog(acl_api.ACLDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(aclapi.ACLDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) - msg := &acl_api.ACLDel{ + msg := &aclapi.ACLDel{ ACLIndex: aclIndex, } - reply := &acl_api.ACLDelReply{} + reply := &aclapi.ACLDelReply{} if err := handler.callsChannel.SendRequest(msg).ReceiveReply(reply); err != nil { return fmt.Errorf("failed to remove L3/L4 ACL %v: %v", aclIndex, err) } @@ -217,14 +217,14 @@ func (handler *aclVppHandler) DeleteIPAcl(aclIndex uint32) error { func (handler *aclVppHandler) DeleteMacIPAcl(aclIndex uint32) error { defer func(t time.Time) { - handler.stopwatch.TimeLog(acl_api.MacipACLDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(aclapi.MacipACLDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) - msg := &acl_api.MacipACLDel{ + msg := &aclapi.MacipACLDel{ ACLIndex: aclIndex, } - reply := &acl_api.MacipACLDelReply{} + reply := &aclapi.MacipACLDelReply{} if err := handler.callsChannel.SendRequest(msg).ReceiveReply(reply); err != nil { return fmt.Errorf("failed to remove L2 ACL %v: %v", aclIndex, err) } @@ -236,9 +236,9 @@ func (handler *aclVppHandler) DeleteMacIPAcl(aclIndex uint32) error { } // Method transforms provided set of IP proto ACL rules to binapi ACL rules. -func (handler *aclVppHandler) transformACLIpRules(rules []*acl.AccessLists_Acl_Rule) (aclIPRules []acl_api.ACLRule, err error) { +func transformACLIpRules(rules []*acl.AccessLists_Acl_Rule) (aclIPRules []aclapi.ACLRule, err error) { for _, rule := range rules { - aclRule := &acl_api.ACLRule{ + aclRule := &aclapi.ACLRule{ IsPermit: uint8(rule.AclAction), } // Match @@ -246,18 +246,18 @@ func (handler *aclVppHandler) transformACLIpRules(rules []*acl.AccessLists_Acl_R // Concerned to IP rules only // L3 if ipRule.Ip != nil { - aclRule, err = handler.ipACL(ipRule.Ip, aclRule) + aclRule, err = ipACL(ipRule.Ip, aclRule) if err != nil { return nil, err } } // ICMP/L4 if ipRule.Icmp != nil { - aclRule = handler.icmpACL(ipRule.Icmp, aclRule) + aclRule = icmpACL(ipRule.Icmp, aclRule) } else if ipRule.Tcp != nil { - aclRule = handler.tcpACL(ipRule.Tcp, aclRule) + aclRule = tcpACL(ipRule.Tcp, aclRule) } else if ipRule.Udp != nil { - aclRule = handler.udpACL(ipRule.Udp, aclRule) + aclRule = udpACL(ipRule.Udp, aclRule) } aclIPRules = append(aclIPRules, *aclRule) } @@ -265,9 +265,9 @@ func (handler *aclVppHandler) transformACLIpRules(rules []*acl.AccessLists_Acl_R return aclIPRules, nil } -func (handler *aclVppHandler) transformACLMacIPRules(rules []*acl.AccessLists_Acl_Rule) (aclMacIPRules []acl_api.MacipACLRule, err error) { +func (handler *aclVppHandler) transformACLMacIPRules(rules []*acl.AccessLists_Acl_Rule) (aclMacIPRules []aclapi.MacipACLRule, err error) { for _, rule := range rules { - aclMacIPRule := &acl_api.MacipACLRule{ + aclMacIPRule := &aclapi.MacipACLRule{ IsPermit: uint8(rule.AclAction), } // Matche @@ -305,7 +305,7 @@ func (handler *aclVppHandler) transformACLMacIPRules(rules []*acl.AccessLists_Ac // The function sets an IP ACL rule fields into provided ACL Rule object. Source // and destination addresses have to be the same IP version and contain a network mask. -func (handler *aclVppHandler) ipACL(ipRule *acl.AccessLists_Acl_Rule_Match_IpRule_Ip, aclRule *acl_api.ACLRule) (*acl_api.ACLRule, error) { +func ipACL(ipRule *acl.AccessLists_Acl_Rule_Match_IpRule_Ip, aclRule *aclapi.ACLRule) (*aclapi.ACLRule, error) { var ( err error srcIP net.IP @@ -372,7 +372,7 @@ func (handler *aclVppHandler) ipACL(ipRule *acl.AccessLists_Acl_Rule_Match_IpRul // The function sets an ICMP ACL rule fields into provided ACL Rule object. // The ranges are exclusive, use first = 0 and last = 255/65535 (icmpv4/icmpv6) to match "any". -func (handler *aclVppHandler) icmpACL(icmpRule *acl.AccessLists_Acl_Rule_Match_IpRule_Icmp, aclRule *acl_api.ACLRule) *acl_api.ACLRule { +func icmpACL(icmpRule *acl.AccessLists_Acl_Rule_Match_IpRule_Icmp, aclRule *aclapi.ACLRule) *aclapi.ACLRule { if icmpRule == nil { return aclRule } @@ -399,7 +399,7 @@ func (handler *aclVppHandler) icmpACL(icmpRule *acl.AccessLists_Acl_Rule_Match_I } // Sets an TCP ACL rule fields into provided ACL Rule object. -func (handler *aclVppHandler) tcpACL(tcpRule *acl.AccessLists_Acl_Rule_Match_IpRule_Tcp, aclRule *acl_api.ACLRule) *acl_api.ACLRule { +func tcpACL(tcpRule *acl.AccessLists_Acl_Rule_Match_IpRule_Tcp, aclRule *aclapi.ACLRule) *aclapi.ACLRule { aclRule.Proto = TCPProto // IANA TCP aclRule.SrcportOrIcmptypeFirst = uint16(tcpRule.SourcePortRange.LowerPort) aclRule.SrcportOrIcmptypeLast = uint16(tcpRule.SourcePortRange.UpperPort) @@ -411,7 +411,7 @@ func (handler *aclVppHandler) tcpACL(tcpRule *acl.AccessLists_Acl_Rule_Match_IpR } // Sets an UDP ACL rule fields into provided ACL Rule object. -func (handler *aclVppHandler) udpACL(udpRule *acl.AccessLists_Acl_Rule_Match_IpRule_Udp, aclRule *acl_api.ACLRule) *acl_api.ACLRule { +func udpACL(udpRule *acl.AccessLists_Acl_Rule_Match_IpRule_Udp, aclRule *aclapi.ACLRule) *aclapi.ACLRule { aclRule.Proto = UDPProto // IANA UDP aclRule.SrcportOrIcmptypeFirst = uint16(udpRule.SourcePortRange.LowerPort) aclRule.SrcportOrIcmptypeLast = uint16(udpRule.SourcePortRange.UpperPort) diff --git a/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go b/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go index 33ab5c15b6..e137c03f1c 100644 --- a/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go @@ -17,15 +17,18 @@ package vppcalls import ( govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/logging/measure" - acl_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/acl" + aclapi "github.com/ligato/vpp-agent/plugins/vpp/binapi/acl" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" ) -// AclVppAPI provides methods required to handle VPP access lists +// AclVppAPI provides read/write methods required to handle VPP access lists type AclVppAPI interface { - // GetAclPluginVersion returns version of the VPP ACL plugin - GetAclPluginVersion() (string, error) + AclVppWrite + AclVppRead +} + +type AclVppWrite interface { // AddIPAcl create new L3/4 ACL. Input index == 0xffffffff, VPP provides index in reply. AddIPAcl(rules []*acl.AccessLists_Acl_Rule, aclName string) (uint32, error) // AddMacIPAcl creates new L2 MAC IP ACL. VPP provides index in reply. @@ -50,6 +53,11 @@ type AclVppAPI interface { SetMacIPAclToInterface(aclIndex uint32, ifIndices []uint32) error // RemoveMacIPIngressACLFromInterfaces removes L2 ACL from interfaces. RemoveMacIPIngressACLFromInterfaces(removedACLIndex uint32, ifIndices []uint32) error +} + +type AclVppRead interface { + // GetAclPluginVersion returns version of the VPP ACL plugin + GetAclPluginVersion() (string, error) // DumpIPACL returns all IP-type ACLs DumpIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*ACLEntry, error) // DumpIPACL returns all MACIP-type ACLs @@ -59,19 +67,19 @@ type AclVppAPI interface { // DumpMACIPACLInterfaces returns a map of MACIP ACL indices with interfaces DumpMACIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex) (map[uint32]*acl.AccessLists_Acl_Interfaces, error) // DumpIPAcls returns a list of all configured ACLs with IP-type ruleData. - DumpIPAcls() (map[ACLIdentifier][]acl_api.ACLRule, error) + DumpIPAcls() (map[ACLIdentifier][]aclapi.ACLRule, error) // DumpMacIPAcls returns a list of all configured ACL with IPMAC-type ruleData. - DumpMacIPAcls() (map[ACLIdentifier][]acl_api.MacipACLRule, error) + DumpMacIPAcls() (map[ACLIdentifier][]aclapi.MacipACLRule, error) // DumpInterfaceAcls finds interface in VPP and returns its ACL configuration DumpInterfaceIPAcls(swIndex uint32) (acl.AccessLists, error) // DumpInterfaceMACIPAcls finds interface in VPP and returns its MACIP ACL configuration DumpInterfaceMACIPAcls(swIndex uint32) (acl.AccessLists, error) // DumpInterfaceIPACLs finds interface in VPP and returns its IP ACL configuration. - DumpInterfaceIPACLs(swIndex uint32) (*acl_api.ACLInterfaceListDetails, error) + DumpInterfaceIPACLs(swIndex uint32) (*aclapi.ACLInterfaceListDetails, error) // DumpInterfaceMACIPACLs finds interface in VPP and returns its MACIP ACL configuration. - DumpInterfaceMACIPACLs(swIndex uint32) (*acl_api.MacipACLInterfaceListDetails, error) + DumpInterfaceMACIPACLs(swIndex uint32) (*aclapi.MacipACLInterfaceListDetails, error) // DumpInterfaces finds all interfaces in VPP and returns their ACL configurations - DumpInterfaces() ([]*acl_api.ACLInterfaceListDetails, []*acl_api.MacipACLInterfaceListDetails, error) + DumpInterfaces() ([]*aclapi.ACLInterfaceListDetails, []*aclapi.MacipACLInterfaceListDetails, error) } // aclVppHandler is accessor for acl-related vppcalls methods diff --git a/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls.go b/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls.go index ae47c9710e..437b1dc44d 100644 --- a/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls.go +++ b/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls.go @@ -77,10 +77,10 @@ func (handler *aclVppHandler) SetMacIPAclToInterface(aclIndex uint32, ifIndices err := handler.callsChannel.SendRequest(req).ReceiveReply(reply) if err != nil { - return fmt.Errorf("failed to set interface %v to L2 ACL %v", ingressIfIdx, aclIndex) + return fmt.Errorf("failed to set interface %d to L2 ACL %d: %v", ingressIfIdx, aclIndex, err) } if reply.Retval != 0 { - return fmt.Errorf("set interface %v to L2 ACL %v returned %v", ingressIfIdx, aclIndex, reply.Retval) + return fmt.Errorf("set interface %d to L2 ACL %d returned %d", ingressIfIdx, aclIndex, reply.Retval) } // Log MacipACLInterfaceAddDel time measurement results. @@ -108,10 +108,10 @@ func (handler *aclVppHandler) RemoveMacIPIngressACLFromInterfaces(removedACLInde err := handler.callsChannel.SendRequest(req).ReceiveReply(reply) if err != nil { - return fmt.Errorf("failed to remove L2 ACL %v from interface %v", removedACLIndex, ifIdx) + return fmt.Errorf("failed to remove L2 ACL %d from interface %d: %v", removedACLIndex, ifIdx, err) } if reply.Retval != 0 { - return fmt.Errorf("remove L2 ACL %v from interface %v returned error %v", removedACLIndex, + return fmt.Errorf("remove L2 ACL %d from interface %d returned error %d", removedACLIndex, removedACLIndex, reply.Retval) } From c8a657286074650bdcb0bbc34640202e9a747825 Mon Sep 17 00:00:00 2001 From: Rastislav Szabo Date: Fri, 20 Jul 2018 08:51:27 +0200 Subject: [PATCH 022/174] Fix incorrect setting of VRF in handleNat44IdentityMapping Signed-off-by: Rastislav Szabo --- plugins/vpp/ifplugin/vppcalls/nat_vppcalls.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/vpp/ifplugin/vppcalls/nat_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/nat_vppcalls.go index 597fde73bf..10a3c64721 100644 --- a/plugins/vpp/ifplugin/vppcalls/nat_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/nat_vppcalls.go @@ -274,7 +274,7 @@ func handleNat44IdentityMapping(ctx *IdentityMappingContext, isAdd bool, vppChan } return ifIdx }(ctx.IfIdx), - VrfID: ctx.IfIdx, + VrfID: ctx.Vrf, IsAdd: boolToUint(isAdd), } From c40ff68cdec565dce7bacd06ad3adde48a5e64f6 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Fri, 20 Jul 2018 11:19:04 +0200 Subject: [PATCH 023/174] address comments Signed-off-by: Vladimir Lavor --- plugins/govppmux/govpp_channel.go | 19 +++++++++++-------- plugins/govppmux/plugin_impl_govppmux.go | 24 +++++++++++++----------- 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/plugins/govppmux/govpp_channel.go b/plugins/govppmux/govpp_channel.go index 5c96500587..1623d8c717 100644 --- a/plugins/govppmux/govpp_channel.go +++ b/plugins/govppmux/govpp_channel.go @@ -48,15 +48,18 @@ func (r *govppRequestCtx) ReceiveReply(reply govppapi.Message) error { if err = r.requestCtx.ReceiveReply(reply); err != nil && err == core.ErrNotConnected && maxAttempts > 0 { // Try to re-sent requests for attemptIdx := 1; attemptIdx <= maxAttempts; attemptIdx++ { - logrus.DefaultLogger().Warnf("Retrying message %v, attempt: %d", r.requestMsg.GetMessageName(), attemptIdx) - if err = r.sendRequest(r.requestMsg).ReceiveReply(reply); err != nil && err == core.ErrNotConnected && attemptIdx != maxAttempts { - time.Sleep(timeout) - continue - } else if err != nil { - return err - } else { - return nil + // Wait, then try again + time.Sleep(timeout) + logrus.DefaultLogger().Warnf("Govppmux: retrying binary API message %v, attempt: %d", + r.requestMsg.GetMessageName(), attemptIdx) + if err = r.sendRequest(r.requestMsg).ReceiveReply(reply); err != nil { + if err == core.ErrNotConnected && attemptIdx != maxAttempts { + continue + } else { + return err + } } + return nil } } diff --git a/plugins/govppmux/plugin_impl_govppmux.go b/plugins/govppmux/plugin_impl_govppmux.go index a66be391e1..a90f97edb6 100644 --- a/plugins/govppmux/plugin_impl_govppmux.go +++ b/plugins/govppmux/plugin_impl_govppmux.go @@ -45,9 +45,11 @@ type GOVPPPlugin struct { vppAdapter adapter.VppAdapter vppConChan chan govpp.ConnectionEvent - replyTimeout time.Duration - reconnectResync bool - lastConnErr error + replyTimeout time.Duration + reconnectResync bool + retryRequestCount int + retryRequestTimeout time.Duration + lastConnErr error config *Config @@ -123,6 +125,8 @@ func (plugin *GOVPPPlugin) Init() error { plugin.replyTimeout = plugin.config.ReplyTimeout plugin.reconnectResync = plugin.config.ReconnectResync shmPrefix = plugin.config.ShmPrefix + plugin.retryRequestCount = plugin.config.RetryRequestCount + plugin.retryRequestTimeout = plugin.config.RetryRequestTimeout plugin.Log.Debug("Setting govpp parameters", plugin.config) } @@ -188,10 +192,9 @@ func (plugin *GOVPPPlugin) NewAPIChannel() (govppapi.Channel, error) { if plugin.replyTimeout > 0 { ch.SetReplyTimeout(plugin.replyTimeout) } - retryCfg := retryConfig{} - if plugin.config != nil { - retryCfg.attempts = plugin.config.RetryRequestCount - retryCfg.timeout = plugin.config.RetryRequestTimeout + retryCfg := retryConfig{ + plugin.retryRequestCount, + plugin.retryRequestTimeout, } return &goVppChan{ch, retryCfg}, nil } @@ -210,10 +213,9 @@ func (plugin *GOVPPPlugin) NewAPIChannelBuffered(reqChanBufSize, replyChanBufSiz if plugin.replyTimeout > 0 { ch.SetReplyTimeout(plugin.replyTimeout) } - retryCfg := retryConfig{} - if plugin.config != nil { - retryCfg.attempts = plugin.config.RetryRequestCount - retryCfg.timeout = plugin.config.RetryRequestTimeout + retryCfg := retryConfig{ + plugin.retryRequestCount, + plugin.retryRequestTimeout, } return &goVppChan{ch, retryCfg}, nil } From e0d0c644d4636bce83d57f701aa5eff97bf677ef Mon Sep 17 00:00:00 2001 From: Rastislav Szabo Date: Fri, 20 Jul 2018 12:45:04 +0200 Subject: [PATCH 024/174] Get rid uf unnecessary check Signed-off-by: Rastislav Szabo --- plugins/vpp/l3plugin/route_utils.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/plugins/vpp/l3plugin/route_utils.go b/plugins/vpp/l3plugin/route_utils.go index 9e121f6c1d..dd05619ba7 100644 --- a/plugins/vpp/l3plugin/route_utils.go +++ b/plugins/vpp/l3plugin/route_utils.go @@ -97,11 +97,8 @@ func TransformRoute(routeInput *l3.StaticRoutes_Route, swIndex uint32, log loggi return nil, nil } if routeInput.DstIpAddr == "" { - if routeInput.Type != l3.StaticRoutes_Route_INTER_VRF { - // no destination address is only allowed for inter-VRF routes - log.Infof("Route does not contain destination address") - return nil, nil - } + log.Infof("Route does not contain destination address") + return nil, nil } parsedDestIP, isIpv6, err := addrs.ParseIPWithPrefix(routeInput.DstIpAddr) if err != nil { From 06ebe92319e6a81ef54be07559cd3502fe9d8581 Mon Sep 17 00:00:00 2001 From: Rastislav Szabo Date: Fri, 20 Jul 2018 13:32:28 +0200 Subject: [PATCH 025/174] Add inter-vrf route examples in vpp-agent-ctl Signed-off-by: Rastislav Szabo --- cmd/vpp-agent-ctl/data_cmd.go | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/cmd/vpp-agent-ctl/data_cmd.go b/cmd/vpp-agent-ctl/data_cmd.go index ac4d7316f1..c4f9d21f7d 100644 --- a/cmd/vpp-agent-ctl/data_cmd.go +++ b/cmd/vpp-agent-ctl/data_cmd.go @@ -942,11 +942,28 @@ func (ctl *VppAgentCtl) createRoute() { Weight: 6, OutgoingInterface: "tap1", }, + // inter-vrf route without next hop addr (recursive lookup) + //{ + // Type: l3.StaticRoutes_Route_INTER_VRF, + // VrfId: 0, + // DstIpAddr: "1.2.3.4/32", + // ViaVrfId: 1, + //}, + // inter-vrf route with next hop addr + //{ + // Type: l3.StaticRoutes_Route_INTER_VRF, + // VrfId: 1, + // DstIpAddr: "10.1.1.3/32", + // NextHopAddr: "192.168.1.13", + // ViaVrfId: 0, + //}, }, } - ctl.Log.Print(routes.Routes[0]) - ctl.broker.Put(l3.RouteKey(routes.Routes[0].VrfId, routes.Routes[0].DstIpAddr, routes.Routes[0].NextHopAddr), routes.Routes[0]) + for _, r := range routes.Routes { + ctl.Log.Print(r) + ctl.broker.Put(l3.RouteKey(r.VrfId, r.DstIpAddr, r.NextHopAddr), r) + } } // DeleteRoute removes VPP route configuration from the ETCD From af98a059a6add9a141c151b46a7887ef82bae5d3 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Fri, 20 Jul 2018 15:45:49 +0200 Subject: [PATCH 026/174] fixed some flaws Signed-off-by: Vladimir Lavor --- plugins/govppmux/govpp_channel.go | 5 ++-- plugins/govppmux/plugin_impl_govppmux.go | 33 ++++++++---------------- 2 files changed, 13 insertions(+), 25 deletions(-) diff --git a/plugins/govppmux/govpp_channel.go b/plugins/govppmux/govpp_channel.go index 1623d8c717..72d319d1fc 100644 --- a/plugins/govppmux/govpp_channel.go +++ b/plugins/govppmux/govpp_channel.go @@ -53,11 +53,10 @@ func (r *govppRequestCtx) ReceiveReply(reply govppapi.Message) error { logrus.DefaultLogger().Warnf("Govppmux: retrying binary API message %v, attempt: %d", r.requestMsg.GetMessageName(), attemptIdx) if err = r.sendRequest(r.requestMsg).ReceiveReply(reply); err != nil { - if err == core.ErrNotConnected && attemptIdx != maxAttempts { + if err == core.ErrNotConnected { continue - } else { - return err } + return err } return nil } diff --git a/plugins/govppmux/plugin_impl_govppmux.go b/plugins/govppmux/plugin_impl_govppmux.go index a90f97edb6..c91c09f69e 100644 --- a/plugins/govppmux/plugin_impl_govppmux.go +++ b/plugins/govppmux/plugin_impl_govppmux.go @@ -45,11 +45,7 @@ type GOVPPPlugin struct { vppAdapter adapter.VppAdapter vppConChan chan govpp.ConnectionEvent - replyTimeout time.Duration - reconnectResync bool - retryRequestCount int - retryRequestTimeout time.Duration - lastConnErr error + lastConnErr error config *Config @@ -117,21 +113,14 @@ func (plugin *GOVPPPlugin) Init() error { if err != nil { return err } - var shmPrefix string if found { govpp.SetHealthCheckProbeInterval(plugin.config.HealthCheckProbeInterval) govpp.SetHealthCheckReplyTimeout(plugin.config.HealthCheckReplyTimeout) govpp.SetHealthCheckThreshold(plugin.config.HealthCheckThreshold) - plugin.replyTimeout = plugin.config.ReplyTimeout - plugin.reconnectResync = plugin.config.ReconnectResync - shmPrefix = plugin.config.ShmPrefix - plugin.retryRequestCount = plugin.config.RetryRequestCount - plugin.retryRequestTimeout = plugin.config.RetryRequestTimeout - plugin.Log.Debug("Setting govpp parameters", plugin.config) } if plugin.vppAdapter == nil { - plugin.vppAdapter = NewVppAdapter(shmPrefix) + plugin.vppAdapter = NewVppAdapter(plugin.config.ShmPrefix) } else { plugin.Log.Info("Reusing existing vppAdapter") //this is used for testing purposes } @@ -189,12 +178,12 @@ func (plugin *GOVPPPlugin) NewAPIChannel() (govppapi.Channel, error) { if err != nil { return nil, err } - if plugin.replyTimeout > 0 { - ch.SetReplyTimeout(plugin.replyTimeout) + if plugin.config.ReplyTimeout > 0 { + ch.SetReplyTimeout(plugin.config.ReplyTimeout) } retryCfg := retryConfig{ - plugin.retryRequestCount, - plugin.retryRequestTimeout, + plugin.config.RetryRequestCount, + plugin.config.RetryRequestTimeout, } return &goVppChan{ch, retryCfg}, nil } @@ -210,12 +199,12 @@ func (plugin *GOVPPPlugin) NewAPIChannelBuffered(reqChanBufSize, replyChanBufSiz if err != nil { return nil, err } - if plugin.replyTimeout > 0 { - ch.SetReplyTimeout(plugin.replyTimeout) + if plugin.config.ReplyTimeout > 0 { + ch.SetReplyTimeout(plugin.config.ReplyTimeout) } retryCfg := retryConfig{ - plugin.retryRequestCount, - plugin.retryRequestTimeout, + plugin.config.RetryRequestCount, + plugin.config.RetryRequestTimeout, } return &goVppChan{ch, retryCfg}, nil } @@ -230,7 +219,7 @@ func (plugin *GOVPPPlugin) handleVPPConnectionEvents(ctx context.Context) { case status := <-plugin.vppConChan: if status.State == govpp.Connected { plugin.retrieveVersion() - if plugin.reconnectResync && plugin.lastConnErr != nil { + if plugin.config.ReconnectResync && plugin.lastConnErr != nil { plugin.Log.Info("Starting resync after VPP reconnect") if plugin.Resync != nil { plugin.Resync.DoResync() From d0dfde8b2f6522e437d89df3e49e28c06ea12ead Mon Sep 17 00:00:00 2001 From: Rastislav Szabo Date: Fri, 20 Jul 2018 16:41:39 +0200 Subject: [PATCH 027/174] Make the NB API change backward compatible Signed-off-by: Rastislav Szabo --- plugins/vpp/model/l3/l3.pb.go | 115 +++++++++++++++++----------------- plugins/vpp/model/l3/l3.proto | 18 +++--- 2 files changed, 67 insertions(+), 66 deletions(-) diff --git a/plugins/vpp/model/l3/l3.pb.go b/plugins/vpp/model/l3/l3.pb.go index aeb07f404d..b82ef7769a 100644 --- a/plugins/vpp/model/l3/l3.pb.go +++ b/plugins/vpp/model/l3/l3.pb.go @@ -72,15 +72,16 @@ func (m *StaticRoutes) GetRoutes() []*StaticRoutes_Route { } type StaticRoutes_Route struct { - Type StaticRoutes_Route_RouteType `protobuf:"varint,1,opt,name=type,proto3,enum=l3.StaticRoutes_Route_RouteType" json:"type,omitempty"` - VrfId uint32 `protobuf:"varint,2,opt,name=vrf_id,json=vrfId,proto3" json:"vrf_id,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - DstIpAddr string `protobuf:"bytes,4,opt,name=dst_ip_addr,json=dstIpAddr,proto3" json:"dst_ip_addr,omitempty"` - NextHopAddr string `protobuf:"bytes,5,opt,name=next_hop_addr,json=nextHopAddr,proto3" json:"next_hop_addr,omitempty"` - ViaVrfId uint32 `protobuf:"varint,6,opt,name=via_vrf_id,json=viaVrfId,proto3" json:"via_vrf_id,omitempty"` - OutgoingInterface string `protobuf:"bytes,7,opt,name=outgoing_interface,json=outgoingInterface,proto3" json:"outgoing_interface,omitempty"` - Weight uint32 `protobuf:"varint,9,opt,name=weight,proto3" json:"weight,omitempty"` - Preference uint32 `protobuf:"varint,10,opt,name=preference,proto3" json:"preference,omitempty"` + Type StaticRoutes_Route_RouteType `protobuf:"varint,10,opt,name=type,proto3,enum=l3.StaticRoutes_Route_RouteType" json:"type,omitempty"` + VrfId uint32 `protobuf:"varint,1,opt,name=vrf_id,json=vrfId,proto3" json:"vrf_id,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + DstIpAddr string `protobuf:"bytes,3,opt,name=dst_ip_addr,json=dstIpAddr,proto3" json:"dst_ip_addr,omitempty"` + NextHopAddr string `protobuf:"bytes,4,opt,name=next_hop_addr,json=nextHopAddr,proto3" json:"next_hop_addr,omitempty"` + OutgoingInterface string `protobuf:"bytes,5,opt,name=outgoing_interface,json=outgoingInterface,proto3" json:"outgoing_interface,omitempty"` + Weight uint32 `protobuf:"varint,6,opt,name=weight,proto3" json:"weight,omitempty"` + Preference uint32 `protobuf:"varint,7,opt,name=preference,proto3" json:"preference,omitempty"` + // (a poor man's primary and backup) + ViaVrfId uint32 `protobuf:"varint,8,opt,name=via_vrf_id,json=viaVrfId,proto3" json:"via_vrf_id,omitempty"` } func (m *StaticRoutes_Route) Reset() { *m = StaticRoutes_Route{} } @@ -123,13 +124,6 @@ func (m *StaticRoutes_Route) GetNextHopAddr() string { return "" } -func (m *StaticRoutes_Route) GetViaVrfId() uint32 { - if m != nil { - return m.ViaVrfId - } - return 0 -} - func (m *StaticRoutes_Route) GetOutgoingInterface() string { if m != nil { return m.OutgoingInterface @@ -151,6 +145,13 @@ func (m *StaticRoutes_Route) GetPreference() uint32 { return 0 } +func (m *StaticRoutes_Route) GetViaVrfId() uint32 { + if m != nil { + return m.ViaVrfId + } + return 0 +} + // IP ARP entries type ArpTable struct { ArpEntries []*ArpTable_ArpEntry `protobuf:"bytes,1,rep,name=arp_entries,json=arpEntries" json:"arp_entries,omitempty"` @@ -400,45 +401,45 @@ func init() { func init() { proto.RegisterFile("l3.proto", fileDescriptorL3) } var fileDescriptorL3 = []byte{ - // 635 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xc1, 0x6e, 0xd3, 0x4c, - 0x10, 0xfe, 0x37, 0x6d, 0xdc, 0x78, 0xd2, 0xf4, 0x87, 0x15, 0x2d, 0xc6, 0x2a, 0x25, 0x58, 0x1c, - 0xc2, 0x01, 0x1f, 0x1a, 0xc4, 0x81, 0x8a, 0x43, 0x0e, 0x45, 0x44, 0x2a, 0x15, 0xda, 0x46, 0xbd, - 0x5a, 0x6e, 0xbc, 0x49, 0x57, 0x32, 0xf6, 0x6a, 0x77, 0x1b, 0x9a, 0x2b, 0xef, 0x00, 0x07, 0x5e, - 0x81, 0x07, 0xe1, 0x5d, 0x10, 0x8f, 0xc0, 0x01, 0x79, 0xbc, 0x76, 0x9c, 0x08, 0x10, 0x97, 0x78, - 0xbe, 0x99, 0x6f, 0xc6, 0x3b, 0xdf, 0x7e, 0x31, 0x74, 0xd2, 0x61, 0x28, 0x55, 0x6e, 0x72, 0xda, - 0x4a, 0x87, 0xc1, 0xd7, 0x2d, 0xd8, 0xbd, 0x30, 0xb1, 0x11, 0x53, 0x96, 0xdf, 0x18, 0xae, 0x69, - 0x08, 0x8e, 0xc2, 0xc8, 0x23, 0xfd, 0xad, 0x41, 0xf7, 0xf8, 0x20, 0x4c, 0x87, 0x61, 0x93, 0x11, - 0xe2, 0x83, 0x59, 0x96, 0xff, 0xbd, 0x05, 0x6d, 0xcc, 0xd0, 0xe7, 0xb0, 0x6d, 0x96, 0x92, 0x7b, - 0xa4, 0x4f, 0x06, 0x7b, 0xc7, 0xfd, 0xdf, 0xf7, 0x95, 0xbf, 0x93, 0xa5, 0xe4, 0x0c, 0xd9, 0x74, - 0x1f, 0x9c, 0x85, 0x9a, 0x45, 0x22, 0xf1, 0x5a, 0x7d, 0x32, 0xe8, 0xb1, 0xf6, 0x42, 0xcd, 0xc6, - 0x09, 0xed, 0x43, 0x37, 0xe1, 0x7a, 0xaa, 0x84, 0x34, 0x22, 0xcf, 0xbc, 0xad, 0x3e, 0x19, 0xb8, - 0xac, 0x99, 0xa2, 0x47, 0xd0, 0x4d, 0xb4, 0x89, 0x84, 0x8c, 0xe2, 0x24, 0x51, 0xde, 0x36, 0x32, - 0xdc, 0x44, 0x9b, 0xb1, 0x1c, 0x25, 0x89, 0xa2, 0x01, 0xf4, 0x32, 0x7e, 0x6b, 0xa2, 0xeb, 0xdc, - 0x32, 0xda, 0xe5, 0x8c, 0x22, 0xf9, 0x26, 0x2f, 0x39, 0x87, 0x00, 0x0b, 0x11, 0x47, 0xf6, 0x00, - 0x0e, 0x1e, 0xa0, 0xb3, 0x10, 0xf1, 0x25, 0x9e, 0xe1, 0x19, 0xd0, 0xfc, 0xc6, 0xcc, 0x73, 0x91, - 0xcd, 0x23, 0x91, 0x19, 0xae, 0x66, 0xf1, 0x94, 0x7b, 0x3b, 0x38, 0xe6, 0x6e, 0x55, 0x19, 0x57, - 0x05, 0x7a, 0x00, 0xce, 0x07, 0x2e, 0xe6, 0xd7, 0xc6, 0x73, 0x71, 0x90, 0x45, 0xf4, 0x08, 0x40, - 0x2a, 0x3e, 0xe3, 0x8a, 0x67, 0x53, 0xee, 0x01, 0xd6, 0x1a, 0x99, 0xe0, 0x29, 0xb8, 0xb5, 0x28, - 0xb4, 0x07, 0xee, 0xf8, 0x7c, 0xc2, 0x46, 0xd1, 0x25, 0x7b, 0x7d, 0xe7, 0x3f, 0x0b, 0x4f, 0x19, - 0x42, 0x12, 0x7c, 0x23, 0xd0, 0x19, 0x29, 0x39, 0x89, 0xaf, 0x52, 0x4e, 0x5f, 0x40, 0x37, 0x56, - 0x32, 0xe2, 0x99, 0x51, 0xa2, 0xbe, 0xae, 0xfd, 0x42, 0xf6, 0x8a, 0x52, 0x04, 0xa7, 0x99, 0x51, - 0x4b, 0x06, 0x71, 0x19, 0x09, 0xae, 0xfd, 0x8f, 0xe5, 0x10, 0x2c, 0xd0, 0x43, 0x70, 0x57, 0xab, - 0x91, 0x52, 0xc3, 0x3a, 0x41, 0x1f, 0x02, 0x58, 0x7d, 0xb9, 0xd6, 0x78, 0x41, 0x45, 0x19, 0xb5, - 0xe3, 0x5a, 0xd3, 0xc7, 0xb0, 0x2b, 0xaf, 0x97, 0xba, 0x26, 0xd8, 0x5b, 0x2a, 0x72, 0x15, 0xe5, - 0x00, 0x1c, 0x8d, 0x26, 0xc0, 0x0b, 0xea, 0x30, 0x8b, 0x82, 0x1f, 0x04, 0xf6, 0xde, 0xa9, 0xfc, - 0x76, 0x39, 0x52, 0x92, 0xc5, 0xd9, 0x9c, 0x6b, 0xfa, 0x0a, 0xba, 0xaa, 0x88, 0xa2, 0x54, 0x68, - 0x53, 0xed, 0x73, 0x58, 0xec, 0xb3, 0x4e, 0x0c, 0xf1, 0x71, 0x26, 0xb4, 0x61, 0xa0, 0xaa, 0x50, - 0xfb, 0x5f, 0x08, 0xb8, 0x75, 0x85, 0xde, 0x83, 0x76, 0x1a, 0x5f, 0xf1, 0xd4, 0xee, 0x54, 0x02, - 0xfa, 0x12, 0x1c, 0xec, 0x28, 0x76, 0x29, 0xa6, 0x07, 0x7f, 0x9b, 0x5e, 0x46, 0xcc, 0x76, 0xf8, - 0x27, 0xd0, 0xc6, 0x04, 0x7d, 0x00, 0x9d, 0x99, 0x50, 0x68, 0x3d, 0x3b, 0x7d, 0x07, 0xf1, 0x58, - 0xd2, 0xfb, 0xb0, 0x93, 0xc6, 0x65, 0xa5, 0x14, 0xcb, 0x29, 0xe0, 0x58, 0x06, 0x3f, 0x09, 0xd0, - 0xea, 0x3d, 0xb5, 0x63, 0x34, 0x7d, 0x0b, 0xff, 0xd7, 0x62, 0xaf, 0xad, 0xfd, 0xa4, 0x79, 0xb0, - 0x55, 0x43, 0x58, 0x87, 0xb8, 0xfe, 0x9e, 0x68, 0x42, 0xed, 0x7f, 0x26, 0xd0, 0x5b, 0x63, 0xfc, - 0x41, 0x86, 0x73, 0x80, 0xba, 0xb3, 0x92, 0x22, 0xfc, 0x97, 0x37, 0xae, 0x10, 0x6b, 0x4c, 0xf0, - 0x1f, 0x81, 0xbb, 0xfa, 0x1b, 0x50, 0xd8, 0xce, 0xe2, 0xf7, 0x95, 0x99, 0x30, 0x0e, 0x3e, 0x11, - 0xe8, 0x5c, 0x4c, 0xce, 0x4b, 0xdf, 0x9e, 0x40, 0x57, 0x9b, 0x6c, 0xc3, 0xb7, 0x3e, 0x7e, 0x2e, - 0x2c, 0xa5, 0x0e, 0xac, 0x79, 0xb5, 0xc9, 0x2a, 0xf3, 0x9e, 0x41, 0x6f, 0xad, 0xb8, 0x61, 0x51, - 0xb2, 0x69, 0xd1, 0x35, 0x7f, 0xb7, 0x36, 0xfc, 0x7d, 0xe5, 0xe0, 0x87, 0x70, 0xf8, 0x2b, 0x00, - 0x00, 0xff, 0xff, 0xc4, 0xdc, 0x1a, 0x5c, 0x14, 0x05, 0x00, 0x00, + // 636 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xfd, 0x26, 0x6d, 0x5c, 0xfb, 0xba, 0xe9, 0x07, 0x23, 0x5a, 0x8c, 0x55, 0x4a, 0xb0, 0x58, + 0x84, 0x05, 0x5e, 0x34, 0x88, 0x05, 0x15, 0x8b, 0x2c, 0x8a, 0x88, 0x54, 0x2a, 0x34, 0x8d, 0xba, + 0xb5, 0xdc, 0x78, 0x92, 0x8e, 0x64, 0xec, 0xd1, 0xcc, 0x34, 0x34, 0x5b, 0xde, 0x01, 0x16, 0xbc, + 0x02, 0x0f, 0xc2, 0xbb, 0x20, 0x1e, 0x81, 0x05, 0xf2, 0xcc, 0xd8, 0x71, 0x22, 0x40, 0x6c, 0xe2, + 0xfb, 0x73, 0xee, 0xb5, 0xcf, 0x99, 0x93, 0x01, 0x37, 0x1f, 0xc6, 0x5c, 0x94, 0xaa, 0xc4, 0x9d, + 0x7c, 0x18, 0x7d, 0xdd, 0x82, 0xdd, 0x0b, 0x95, 0x2a, 0x36, 0x25, 0xe5, 0x8d, 0xa2, 0x12, 0xc7, + 0xe0, 0x08, 0x1d, 0x05, 0xa8, 0xbf, 0x35, 0xf0, 0x8f, 0x0f, 0xe2, 0x7c, 0x18, 0xb7, 0x11, 0xb1, + 0x7e, 0x10, 0x8b, 0x0a, 0xbf, 0x77, 0xa0, 0xab, 0x2b, 0xf8, 0x39, 0x6c, 0xab, 0x25, 0xa7, 0x01, + 0xf4, 0xd1, 0x60, 0xef, 0xb8, 0xff, 0xfb, 0x39, 0xf3, 0x3b, 0x59, 0x72, 0x4a, 0x34, 0x1a, 0xef, + 0x83, 0xb3, 0x10, 0xb3, 0x84, 0x65, 0x01, 0xea, 0xa3, 0x41, 0x8f, 0x74, 0x17, 0x62, 0x36, 0xce, + 0x70, 0x1f, 0xfc, 0x8c, 0xca, 0xa9, 0x60, 0x5c, 0xb1, 0xb2, 0x08, 0x3a, 0x7d, 0x34, 0xf0, 0x48, + 0xbb, 0x84, 0x8f, 0xc0, 0xcf, 0xa4, 0x4a, 0x18, 0x4f, 0xd2, 0x2c, 0x13, 0xc1, 0x96, 0x46, 0x78, + 0x99, 0x54, 0x63, 0x3e, 0xca, 0x32, 0x81, 0x23, 0xe8, 0x15, 0xf4, 0x56, 0x25, 0xd7, 0xa5, 0x45, + 0x6c, 0x9b, 0x1d, 0x55, 0xf1, 0x4d, 0x69, 0x30, 0xcf, 0x00, 0x97, 0x37, 0x6a, 0x5e, 0xb2, 0x62, + 0x9e, 0xb0, 0x42, 0x51, 0x31, 0x4b, 0xa7, 0x34, 0xe8, 0x6a, 0xe0, 0xdd, 0xba, 0x33, 0xae, 0x1b, + 0xf8, 0x00, 0x9c, 0x0f, 0x94, 0xcd, 0xaf, 0x55, 0xe0, 0xe8, 0x6f, 0xb5, 0x19, 0x3e, 0x02, 0xe0, + 0x82, 0xce, 0xa8, 0xa0, 0xc5, 0x94, 0x06, 0x3b, 0xba, 0xd7, 0xaa, 0xe0, 0x43, 0x80, 0x05, 0x4b, + 0x13, 0xcb, 0xd3, 0xd5, 0x7d, 0x77, 0xc1, 0xd2, 0xcb, 0x8a, 0x6a, 0xf4, 0x14, 0xbc, 0x46, 0x14, + 0xdc, 0x03, 0x6f, 0x7c, 0x3e, 0x21, 0xa3, 0xe4, 0x92, 0xbc, 0xbe, 0xf3, 0x9f, 0x4d, 0x4f, 0x89, + 0x4e, 0x51, 0xf4, 0x0d, 0x81, 0x3b, 0x12, 0x7c, 0x92, 0x5e, 0xe5, 0x14, 0xbf, 0x00, 0x3f, 0x15, + 0x3c, 0xa1, 0x85, 0x12, 0xac, 0x39, 0xae, 0xfd, 0x4a, 0xf6, 0x1a, 0x52, 0x05, 0xa7, 0x85, 0x12, + 0x4b, 0x02, 0xa9, 0x89, 0x18, 0x95, 0xe1, 0x47, 0xb3, 0x44, 0x37, 0xf0, 0x21, 0x78, 0x2b, 0xe2, + 0xc8, 0x68, 0xd8, 0x14, 0xf0, 0x43, 0x00, 0xab, 0x2f, 0x95, 0xd2, 0x1e, 0x82, 0xc7, 0xb4, 0x76, + 0x54, 0x4a, 0xfc, 0x18, 0x76, 0xf9, 0xf5, 0x52, 0x36, 0x00, 0x73, 0x06, 0x7e, 0x55, 0xab, 0x21, + 0x07, 0xe0, 0x48, 0x6d, 0x02, 0x2d, 0xbf, 0x4b, 0x6c, 0x16, 0xfd, 0x40, 0xb0, 0xf7, 0x4e, 0x94, + 0xb7, 0xcb, 0x91, 0xe0, 0x24, 0x2d, 0xe6, 0x54, 0xe2, 0x57, 0xe0, 0x8b, 0x2a, 0x4a, 0x72, 0x26, + 0x55, 0xcd, 0xe7, 0xb0, 0xe2, 0xb3, 0x0e, 0x8c, 0xf5, 0xe3, 0x8c, 0x49, 0x45, 0x40, 0xd4, 0xa1, + 0x0c, 0xbf, 0x20, 0xf0, 0x9a, 0x0e, 0xbe, 0x07, 0xdd, 0x3c, 0xbd, 0xa2, 0xb9, 0xe5, 0x64, 0x12, + 0xfc, 0x12, 0x1c, 0x3d, 0x51, 0x71, 0xa9, 0xb6, 0x47, 0x7f, 0xdb, 0x6e, 0x22, 0x62, 0x27, 0xc2, + 0x13, 0xe8, 0xea, 0x02, 0x7e, 0x00, 0xee, 0x8c, 0x09, 0x6d, 0x3d, 0xbb, 0x7d, 0x47, 0xe7, 0x63, + 0x8e, 0xef, 0xc3, 0x4e, 0x9e, 0x9a, 0x8e, 0x11, 0xcb, 0xa9, 0xd2, 0x31, 0x8f, 0x7e, 0x22, 0xc0, + 0xf5, 0x7b, 0x1a, 0x3f, 0x49, 0xfc, 0x16, 0xfe, 0x6f, 0xc4, 0x5e, 0xa3, 0xfd, 0xa4, 0xfd, 0x61, + 0xab, 0x81, 0xb8, 0x09, 0x35, 0xfd, 0x3d, 0xd6, 0x4e, 0x65, 0xf8, 0x19, 0x41, 0x6f, 0x0d, 0xf1, + 0x07, 0x19, 0xce, 0x01, 0x9a, 0xc9, 0x5a, 0x8a, 0xf8, 0x5f, 0xde, 0xb8, 0xca, 0x48, 0x6b, 0x43, + 0xf8, 0x08, 0xbc, 0xd5, 0x9f, 0x04, 0xc3, 0x76, 0x91, 0xbe, 0xaf, 0xcd, 0xa4, 0xe3, 0xe8, 0x13, + 0x02, 0xf7, 0x62, 0x72, 0x6e, 0x7c, 0x7b, 0x02, 0xbe, 0x54, 0xc5, 0x86, 0x6f, 0x43, 0x7d, 0x5d, + 0x58, 0x48, 0x13, 0x58, 0xf3, 0x4a, 0x55, 0xd4, 0xe6, 0x3d, 0x83, 0xde, 0x5a, 0x73, 0xc3, 0xa2, + 0x68, 0xd3, 0xa2, 0x6b, 0xfe, 0xee, 0x6c, 0xf8, 0xfb, 0xca, 0xd1, 0x17, 0xe1, 0xf0, 0x57, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x41, 0x97, 0x51, 0xa6, 0x14, 0x05, 0x00, 0x00, } diff --git a/plugins/vpp/model/l3/l3.proto b/plugins/vpp/model/l3/l3.proto index 0662b27055..6a1664c7ef 100644 --- a/plugins/vpp/model/l3/l3.proto +++ b/plugins/vpp/model/l3/l3.proto @@ -14,19 +14,19 @@ message StaticRoutes { does not have to be specified either, in that case VPP does full recursive lookup in the via_vrf_id VRF. */ } - RouteType type = 1; - uint32 vrf_id = 2; /* VRF identifier, field required for remote client. This value should be + RouteType type = 10; + uint32 vrf_id = 1; /* VRF identifier, field required for remote client. This value should be consistent with VRF ID in static route key. If it is not, value from key will be preffered and this field will be overriden. */ - string description = 3; /* optional description */ - string dst_ip_addr = 4; /* ip address + prefix in format
/ */ - string next_hop_addr = 5; /* next hop address */ - uint32 via_vrf_id = 6; /* Specifies VRF ID for the next hop lookup / recursive lookup */ - string outgoing_interface = 7; /* outgoing interface name */ - uint32 weight = 9; /* weight (used for unequal cost load balncing) */ - uint32 preference = 10; /* The preference of the path. Lowest preference is preferred. */ + string description = 2; /* optional description */ + string dst_ip_addr = 3; /* ip address + prefix in format
/ */ + string next_hop_addr = 4; /* next hop address */ + string outgoing_interface = 5; /* outgoing interface name */ + uint32 weight = 6; /* weight (used for unequal cost load balncing) */ + uint32 preference = 7; /* The preference of the path. Lowest preference is preferred. */ /* Only paths with the best preference contribute to forwarding. */ /* (a poor man's primary and backup) */ + uint32 via_vrf_id = 8; /* Specifies VRF ID for the next hop lookup / recursive lookup */ } repeated Route routes = 1; /* list of IP static routes */ } From ed0097174ecc0ed5fba2490ab1f488137e6ba051 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 23 Jul 2018 09:17:01 +0200 Subject: [PATCH 028/174] simplify Signed-off-by: Vladimir Lavor --- plugins/govppmux/govpp_channel.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/plugins/govppmux/govpp_channel.go b/plugins/govppmux/govpp_channel.go index 72d319d1fc..cdd5865ea1 100644 --- a/plugins/govppmux/govpp_channel.go +++ b/plugins/govppmux/govpp_channel.go @@ -52,13 +52,10 @@ func (r *govppRequestCtx) ReceiveReply(reply govppapi.Message) error { time.Sleep(timeout) logrus.DefaultLogger().Warnf("Govppmux: retrying binary API message %v, attempt: %d", r.requestMsg.GetMessageName(), attemptIdx) - if err = r.sendRequest(r.requestMsg).ReceiveReply(reply); err != nil { - if err == core.ErrNotConnected { - continue - } - return err + if err = r.sendRequest(r.requestMsg).ReceiveReply(reply); err == core.ErrNotConnected { + continue } - return nil + return err } } From dd9140d816ec228457587f8debedc08d64c6b174 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 23 Jul 2018 09:31:56 +0200 Subject: [PATCH 029/174] Update govpp_channel.go --- plugins/govppmux/govpp_channel.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/govppmux/govpp_channel.go b/plugins/govppmux/govpp_channel.go index cdd5865ea1..aacde6674c 100644 --- a/plugins/govppmux/govpp_channel.go +++ b/plugins/govppmux/govpp_channel.go @@ -45,7 +45,7 @@ func (r *govppRequestCtx) ReceiveReply(reply govppapi.Message) error { var err error // Receive reply from original send - if err = r.requestCtx.ReceiveReply(reply); err != nil && err == core.ErrNotConnected && maxAttempts > 0 { + if err = r.requestCtx.ReceiveReply(reply); err == core.ErrNotConnected && maxAttempts > 0 { // Try to re-sent requests for attemptIdx := 1; attemptIdx <= maxAttempts; attemptIdx++ { // Wait, then try again From ed190aeffaf1d540afda31bbcaace371bcdad1cf Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Fri, 6 Jul 2018 14:50:02 +0200 Subject: [PATCH 030/174] interface plugin vppcalls api Signed-off-by: Vladimir Lavor --- plugins/rest/rest_handlers.go | 3 +- plugins/vpp/ifplugin/afpacket_config.go | 19 +- plugins/vpp/ifplugin/afpacket_config_test.go | 18 +- plugins/vpp/ifplugin/bfd_config.go | 54 ++-- plugins/vpp/ifplugin/bfd_config_test.go | 7 +- plugins/vpp/ifplugin/data_resync.go | 16 +- plugins/vpp/ifplugin/data_resync_test.go | 7 +- plugins/vpp/ifplugin/interface_config.go | 105 ++++---- plugins/vpp/ifplugin/interface_config_test.go | 7 +- plugins/vpp/ifplugin/nat_config.go | 51 ++-- plugins/vpp/ifplugin/nat_config_test.go | 7 +- plugins/vpp/ifplugin/stn_config.go | 26 +- plugins/vpp/ifplugin/stn_config_test.go | 7 +- .../vpp/ifplugin/vppcalls/admin_vppcalls.go | 55 ++-- .../ifplugin/vppcalls/admin_vppcalls_test.go | 57 ++-- .../ifplugin/vppcalls/afpacket_vppcalls.go | 21 +- .../vppcalls/afpacket_vppcalls_test.go | 44 ++-- plugins/vpp/ifplugin/vppcalls/api_vppcalls.go | 247 +++++++++++++++++ plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go | 91 +++---- .../ifplugin/vppcalls/bfd_vppcalls_test.go | 248 +++++++++--------- .../vpp/ifplugin/vppcalls/compat_vppcalls.go | 25 +- .../vpp/ifplugin/vppcalls/dhcp_vppcalls.go | 18 +- .../ifplugin/vppcalls/dhcp_vppcalls_test.go | 26 +- plugins/vpp/ifplugin/vppcalls/doc.go | 2 +- .../dump_interface_vppcalls.go} | 136 ++++------ .../dump_interface_vppcalls_test.go} | 56 ++-- .../dump_nat_vppcalls.go | 167 ++++++------ .../dump_nat_vppcalls_test.go | 23 +- .../dump_stn_vppcalls.go | 11 +- .../vppcalls/ip_container_vppcalls.go | 16 +- .../vppcalls/ip_container_vppcalls_test.go | 38 ++- plugins/vpp/ifplugin/vppcalls/ip_vppcalls.go | 34 +-- .../vpp/ifplugin/vppcalls/ip_vppcalls_test.go | 70 +++-- .../ifplugin/vppcalls/loopback_vppcalls.go | 20 +- .../vppcalls/loopback_vppcalls_test.go | 26 +- plugins/vpp/ifplugin/vppcalls/mac_vppcalls.go | 9 +- .../ifplugin/vppcalls/mac_vppcalls_test.go | 19 +- .../vpp/ifplugin/vppcalls/memif_vppcalls.go | 27 +- .../ifplugin/vppcalls/memif_vppcalls_test.go | 50 ++-- plugins/vpp/ifplugin/vppcalls/mtu_vppcalls.go | 9 +- .../ifplugin/vppcalls/mtu_vppcalls_test.go | 14 +- plugins/vpp/ifplugin/vppcalls/nat_vppcalls.go | 110 ++++---- .../ifplugin/vppcalls/nat_vppcalls_test.go | 145 +++++----- .../vpp/ifplugin/vppcalls/rx_mode_vppcalls.go | 9 +- .../vppcalls/rx_mode_vppcalls_test.go | 20 +- .../vppcalls/rx_placement_vppcalls.go | 13 +- .../vppcalls/rx_placement_vppcalls_test.go | 26 +- plugins/vpp/ifplugin/vppcalls/stn_vppcalls.go | 18 +- .../ifplugin/vppcalls/stn_vppcalls_test.go | 33 ++- plugins/vpp/ifplugin/vppcalls/tap_vppcalls.go | 24 +- .../ifplugin/vppcalls/tap_vppcalls_test.go | 46 ++-- plugins/vpp/ifplugin/vppcalls/vrf_vppcalls.go | 36 ++- .../ifplugin/vppcalls/vrf_vppcalls_test.go | 43 ++- .../vpp/ifplugin/vppcalls/vxlan_vppcalls.go | 25 +- .../ifplugin/vppcalls/vxlan_vppcalls_test.go | 69 +++-- plugins/vpp/ifplugin/vppdump/doc.go | 2 - plugins/vpp/ipsecplugin/ipsec_config.go | 12 +- plugins/vpp/l2plugin/bd_config.go | 7 + plugins/vpp/l2plugin/data_resync.go | 3 +- .../vpp/l3plugin/vppcalls/route_vppcalls.go | 4 +- plugins/vpp/plugin_impl_vpp.go | 8 +- 61 files changed, 1337 insertions(+), 1202 deletions(-) create mode 100644 plugins/vpp/ifplugin/vppcalls/api_vppcalls.go rename plugins/vpp/ifplugin/{vppdump/dump_vppcalls.go => vppcalls/dump_interface_vppcalls.go} (72%) rename plugins/vpp/ifplugin/{vppdump/dump_vppcalls_test.go => vppcalls/dump_interface_vppcalls_test.go} (90%) rename plugins/vpp/ifplugin/{vppdump => vppcalls}/dump_nat_vppcalls.go (60%) rename plugins/vpp/ifplugin/{vppdump => vppcalls}/dump_nat_vppcalls_test.go (78%) rename plugins/vpp/ifplugin/{vppdump => vppcalls}/dump_stn_vppcalls.go (70%) delete mode 100644 plugins/vpp/ifplugin/vppdump/doc.go diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index 45a4f1e1b4..f0ecac86ed 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -51,7 +51,8 @@ func (plugin *Plugin) interfacesGetHandler(formatter *render.Render) http.Handle } defer ch.Close() - res, err := ifplugin.DumpInterfaces(plugin.Log, ch, nil) + ifHandler := ifcalls.NewIfVppHandler(ch, plugin.Log, nil) + res, err := ifHandler.DumpInterfaces() if err != nil { plugin.Log.Errorf("Error: %v", err) formatter.JSON(w, http.StatusInternalServerError, err) diff --git a/plugins/vpp/ifplugin/afpacket_config.go b/plugins/vpp/ifplugin/afpacket_config.go index c93a3d9308..ec265825ff 100644 --- a/plugins/vpp/ifplugin/afpacket_config.go +++ b/plugins/vpp/ifplugin/afpacket_config.go @@ -19,7 +19,6 @@ import ( govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" @@ -37,8 +36,7 @@ type AFPacketConfigurator struct { afPacketByName map[string]*AfPacketConfig // af packet name -> Af Packet interface configuration linuxHostInterfaces map[string]struct{} // a set of available host (Linux) interfaces - vppCh govppapi.Channel // govpp channel used by InterfaceConfigurator - stopwatch *measure.Stopwatch // from InterfaceConfigurator + ifHandler vppcalls.IfVppAPI // govpp channel used by InterfaceConfigurator } // AfPacketConfig wraps the proto formatted configuration of an Afpacket interface together with a flag @@ -73,13 +71,13 @@ func (plugin *AFPacketConfigurator) GetHostInterfacesEntry(hostIf string) bool { } // Init members of AFPacketConfigurator. -func (plugin *AFPacketConfigurator) Init(logger logging.Logger, vppCh govppapi.Channel, linux interface{}, - indexes ifaceidx.SwIfIndexRW, stopwatch *measure.Stopwatch) (err error) { +func (plugin *AFPacketConfigurator) Init(logger logging.Logger, ifHandler vppcalls.IfVppAPI, linux interface{}, + indexes ifaceidx.SwIfIndexRW) (err error) { plugin.log = logger plugin.log.Infof("Initializing AF-Packet configurator") - // VPP channel - plugin.vppCh = vppCh + // VPP API handler + plugin.ifHandler = ifHandler // Linux plugin.linux = linux @@ -90,9 +88,6 @@ func (plugin *AFPacketConfigurator) Init(logger logging.Logger, vppCh govppapi.C plugin.afPacketByName = make(map[string]*AfPacketConfig) plugin.linuxHostInterfaces = make(map[string]struct{}) - // Stopwatch - plugin.stopwatch = stopwatch - return nil } @@ -115,7 +110,7 @@ func (plugin *AFPacketConfigurator) ConfigureAfPacketInterface(afpacket *intf.In return 0, true, nil } } - swIdx, err := vppcalls.AddAfPacketInterface(afpacket.Name, afpacket.PhysAddress, afpacket.Afpacket, plugin.vppCh, plugin.stopwatch) + swIdx, err := plugin.ifHandler.AddAfPacketInterface(afpacket.Name, afpacket.PhysAddress, afpacket.Afpacket) if err != nil { plugin.addToCache(afpacket, true) return 0, true, err @@ -156,7 +151,7 @@ func (plugin *AFPacketConfigurator) DeleteAfPacketInterface(afpacket *intf.Inter config, found := plugin.afPacketByName[afpacket.Name] if !found || !config.pending { - err = vppcalls.DeleteAfPacketInterface(afpacket.Name, ifIdx, afpacket.GetAfpacket(), plugin.vppCh, plugin.stopwatch) + err = plugin.ifHandler.DeleteAfPacketInterface(afpacket.Name, ifIdx, afpacket.GetAfpacket()) // unregister interface to let other plugins know that it is removed from the vpp plugin.ifIndexes.UnregisterName(afpacket.Name) } diff --git a/plugins/vpp/ifplugin/afpacket_config_test.go b/plugins/vpp/ifplugin/afpacket_config_test.go index e34660f716..00c12fe1b0 100644 --- a/plugins/vpp/ifplugin/afpacket_config_test.go +++ b/plugins/vpp/ifplugin/afpacket_config_test.go @@ -21,12 +21,14 @@ import ( "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/cn-infra/utils/safeclose" "github.com/ligato/vpp-agent/idxvpp/nametoidx" ap_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/af_packet" if_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" @@ -42,7 +44,9 @@ func TestAfPacketConfiguratorInit(t *testing.T) { plugin := &ifplugin.AFPacketConfigurator{} vppCh, err := connection.NewAPIChannel() Expect(err).To(BeNil()) - err = plugin.Init(logrus.DefaultLogger(), vppCh, struct{}{}, nil, nil) + stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) + ifHandler := vppcalls.NewIfVppHandler(vppCh, logrus.DefaultLogger(), stopwatch) + err = plugin.Init(logrus.DefaultLogger(), ifHandler, struct{}{}, nil) Expect(err).To(BeNil()) connection.Disconnect() } @@ -407,7 +411,9 @@ func TestAfPacketNewLinuxInterfaceNoLinux(t *testing.T) { swIfIndices := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "afpacket", nil)) // Configurator plugin := &ifplugin.AFPacketConfigurator{} - err := plugin.Init(log, ctx.MockChannel, nil, swIfIndices, nil) + stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) + ifHandler := vppcalls.NewIfVppHandler(ctx.MockChannel, log, stopwatch) + err := plugin.Init(log, ifHandler, nil, swIfIndices) Expect(err).To(BeNil()) // Test registered linux interface config := plugin.ResolveCreatedLinuxInterface("host1", "host1", 1) @@ -462,7 +468,9 @@ func TestAfPacketDeleteLinuxInterfaceNoLinux(t *testing.T) { swIfIndices := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "afpacket", nil)) // Configurator plugin := &ifplugin.AFPacketConfigurator{} - err := plugin.Init(log, ctx.MockChannel, nil, swIfIndices, nil) + stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) + ifHandler := vppcalls.NewIfVppHandler(ctx.MockChannel, log, stopwatch) + err := plugin.Init(log, ifHandler, nil, swIfIndices) Expect(err).To(BeNil()) // Prepare plugin.ResolveCreatedLinuxInterface("host1", "host1", 1) @@ -513,7 +521,9 @@ func afPacketTestSetup(t *testing.T) (*vppcallmock.TestCtx, *ifplugin.AFPacketCo swIfIndices := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "afpacket", nil)) // Configurator plugin := &ifplugin.AFPacketConfigurator{} - err := plugin.Init(log, ctx.MockChannel, struct{}{}, swIfIndices, nil) + stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) + ifHandler := vppcalls.NewIfVppHandler(ctx.MockChannel, log, stopwatch) + err := plugin.Init(log, ifHandler, struct{}{}, swIfIndices) Expect(err).To(BeNil()) return ctx, plugin, swIfIndices diff --git a/plugins/vpp/ifplugin/bfd_config.go b/plugins/vpp/ifplugin/bfd_config.go index 0f31fd9fe0..9dbe4a497d 100644 --- a/plugins/vpp/ifplugin/bfd_config.go +++ b/plugins/vpp/ifplugin/bfd_config.go @@ -50,16 +50,22 @@ type BFDConfigurator struct { keysIndexes idxvpp.NameToIdxRW echoFunctionIndex idxvpp.NameToIdxRW - vppChan govppapi.Channel + vppChan govppapi.VPPChannel + + // VPP API handler + bfdHandler vppcalls.BfdVppAPI } // Init members and channels func (plugin *BFDConfigurator) Init(logger logging.PluginLogger, goVppMux govppmux.API, swIfIndexes ifaceidx.SwIfIndex, - enableStopwatch bool) (err error) { + stopwatch *measure.Stopwatch) (err error) { // Logger plugin.log = logger.NewLogger("-bfd-conf") plugin.log.Infof("Initializing BFD configurator") + // Stopwatch + plugin.stopwatch = stopwatch + // Mappings plugin.ifIndexes = swIfIndexes plugin.sessionsIndexes = nametoidx.NewNameToIdx(plugin.log, "bfd_session_indexes", nil) @@ -72,12 +78,10 @@ func (plugin *BFDConfigurator) Init(logger logging.PluginLogger, goVppMux govppm return err } - // Stopwatch - if enableStopwatch { - plugin.stopwatch = measure.NewStopwatch("BFDConfigurator", plugin.log) - } + // VPP API handler + plugin.bfdHandler = vppcalls.NewBfdVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch) - if err = vppcalls.CheckMsgCompatibilityForBfd(plugin.vppChan); err != nil { + if err = plugin.bfdHandler.CheckMsgCompatibilityForBfd(); err != nil { plugin.log.Error(err) return err } @@ -146,7 +150,7 @@ func (plugin *BFDConfigurator) ConfigureBfdSession(bfdInput *bfd.SingleHopBFD_Se } // Call vpp api - err := vppcalls.AddBfdUDPSession(bfdInput, ifIdx, plugin.keysIndexes, plugin.log, plugin.vppChan, plugin.stopwatch) + err := plugin.bfdHandler.AddBfdUDPSession(bfdInput, ifIdx, plugin.keysIndexes) if err != nil { return fmt.Errorf("error while configuring BFD for interface %v", bfdInput.Interface) } @@ -197,7 +201,7 @@ func (plugin *BFDConfigurator) ModifyBfdSession(oldBfdInput *bfd.SingleHopBFD_Se _, _, found = plugin.sessionsIndexes.LookupIdx(oldBfdInput.Interface) if !found { plugin.log.Printf("Previous BFD session does not exist, creating a new one for interface %v", newBfdInput.Interface) - err := vppcalls.AddBfdUDPSession(newBfdInput, ifIdx, plugin.keysIndexes, plugin.log, plugin.vppChan, plugin.stopwatch) + err := plugin.bfdHandler.AddBfdUDPSession(newBfdInput, ifIdx, plugin.keysIndexes) if err != nil { return err } @@ -210,7 +214,7 @@ func (plugin *BFDConfigurator) ModifyBfdSession(oldBfdInput *bfd.SingleHopBFD_Se return fmt.Errorf("unable to modify BFD session, adresses does not match. Odl session source: %v, dest: %v, new session source: %v, dest: %v", oldBfdInput.SourceAddress, oldBfdInput.DestinationAddress, newBfdInput.SourceAddress, newBfdInput.DestinationAddress) } - err := vppcalls.ModifyBfdUDPSession(newBfdInput, plugin.ifIndexes, plugin.vppChan, plugin.stopwatch) + err := plugin.bfdHandler.ModifyBfdUDPSession(newBfdInput, plugin.ifIndexes) if err != nil { return err } @@ -230,7 +234,7 @@ func (plugin *BFDConfigurator) DeleteBfdSession(bfdInput *bfd.SingleHopBFD_Sessi return fmt.Errorf("cannot remove BFD session, interface %s not found", bfdInput.Interface) } - err := vppcalls.DeleteBfdUDPSession(ifIndex, bfdInput.SourceAddress, bfdInput.DestinationAddress, plugin.vppChan, plugin.stopwatch) + err := plugin.bfdHandler.DeleteBfdUDPSession(ifIndex, bfdInput.SourceAddress, bfdInput.DestinationAddress) if err != nil { return fmt.Errorf("error while deleting BFD for interface %v", bfdInput.Interface) } @@ -245,7 +249,7 @@ func (plugin *BFDConfigurator) DeleteBfdSession(bfdInput *bfd.SingleHopBFD_Sessi func (plugin *BFDConfigurator) DumpBfdSessions() ([]*bfd.SingleHopBFD_Session, error) { var bfdSessionList []*bfd.SingleHopBFD_Session - bfdList, err := vppcalls.DumpBfdUDPSessions(plugin.vppChan, plugin.stopwatch) + bfdList, err := plugin.bfdHandler.DumpBfdUDPSessions() if err != nil { return bfdSessionList, err } @@ -284,7 +288,7 @@ func (plugin *BFDConfigurator) DumpBfdSessions() ([]*bfd.SingleHopBFD_Session, e func (plugin *BFDConfigurator) ConfigureBfdAuthKey(bfdAuthKey *bfd.SingleHopBFD_Key) error { plugin.log.Infof("Configuring BFD authentication key with ID %v", bfdAuthKey.Id) - err := vppcalls.SetBfdUDPAuthenticationKey(bfdAuthKey, plugin.log, plugin.vppChan, plugin.stopwatch) + err := plugin.bfdHandler.SetBfdUDPAuthenticationKey(bfdAuthKey) if err != nil { return fmt.Errorf("error while setting up BFD auth key with ID %v", bfdAuthKey.Id) } @@ -304,7 +308,7 @@ func (plugin *BFDConfigurator) ModifyBfdAuthKey(oldInput *bfd.SingleHopBFD_Key, plugin.log.Infof("Modifying BFD auth key for ID %d", oldInput.Id) // Check that this auth key is not used in any session - sessionList, err := vppcalls.DumpBfdUDPSessionsWithID(newInput.Id, plugin.vppChan, plugin.stopwatch) + sessionList, err := plugin.bfdHandler.DumpBfdUDPSessionsWithID(newInput.Id) if err != nil { return fmt.Errorf("error while verifying authentication key usage. Id: %d: %v", oldInput.Id, err) } @@ -313,7 +317,7 @@ func (plugin *BFDConfigurator) ModifyBfdAuthKey(oldInput *bfd.SingleHopBFD_Key, for _, bfds := range sessionList { sourceAddr := net.HardwareAddr(bfds.LocalAddr).String() destAddr := net.HardwareAddr(bfds.PeerAddr).String() - err := vppcalls.DeleteBfdUDPSession(bfds.SwIfIndex, sourceAddr, destAddr, plugin.vppChan, plugin.stopwatch) + err := plugin.bfdHandler.DeleteBfdUDPSession(bfds.SwIfIndex, sourceAddr, destAddr) if err != nil { return err } @@ -321,11 +325,11 @@ func (plugin *BFDConfigurator) ModifyBfdAuthKey(oldInput *bfd.SingleHopBFD_Key, plugin.log.Debugf("%v session(s) temporary removed", len(sessionList)) } - err = vppcalls.DeleteBfdUDPAuthenticationKey(oldInput, plugin.vppChan, plugin.stopwatch) + err = plugin.bfdHandler.DeleteBfdUDPAuthenticationKey(oldInput) if err != nil { return fmt.Errorf("error while removing BFD auth key with ID %d: %v", oldInput.Id, err) } - err = vppcalls.SetBfdUDPAuthenticationKey(newInput, plugin.log, plugin.vppChan, plugin.stopwatch) + err = plugin.bfdHandler.SetBfdUDPAuthenticationKey(newInput) if err != nil { return fmt.Errorf("error while setting up BFD auth key with ID %d: %v", oldInput.Id, err) } @@ -333,7 +337,7 @@ func (plugin *BFDConfigurator) ModifyBfdAuthKey(oldInput *bfd.SingleHopBFD_Key, // Recreate BFD sessions if necessary if len(sessionList) != 0 { for _, bfdSession := range sessionList { - err := vppcalls.AddBfdUDPSessionFromDetails(bfdSession, plugin.keysIndexes, plugin.log, plugin.vppChan, plugin.stopwatch) + err := plugin.bfdHandler.AddBfdUDPSessionFromDetails(bfdSession, plugin.keysIndexes) if err != nil { return err } @@ -349,7 +353,7 @@ func (plugin *BFDConfigurator) DeleteBfdAuthKey(bfdInput *bfd.SingleHopBFD_Key) plugin.log.Info("Deleting BFD auth key") // Check that this auth key is not used in any session - sessionList, err := vppcalls.DumpBfdUDPSessionsWithID(bfdInput.Id, plugin.vppChan, plugin.stopwatch) + sessionList, err := plugin.bfdHandler.DumpBfdUDPSessionsWithID(bfdInput.Id) if err != nil { return fmt.Errorf("error while verifying authentication key usage. Id: %v", bfdInput.Id) } @@ -359,14 +363,14 @@ func (plugin *BFDConfigurator) DeleteBfdAuthKey(bfdInput *bfd.SingleHopBFD_Key) for _, bfds := range sessionList { sourceAddr := net.IP(bfds.LocalAddr[0:4]).String() destAddr := net.IP(bfds.PeerAddr[0:4]).String() - err := vppcalls.DeleteBfdUDPSession(bfds.SwIfIndex, sourceAddr, destAddr, plugin.vppChan, nil) + err := plugin.bfdHandler.DeleteBfdUDPSession(bfds.SwIfIndex, sourceAddr, destAddr) if err != nil { return err } } plugin.log.Debugf("%v session(s) temporary removed", len(sessionList)) } - err = vppcalls.DeleteBfdUDPAuthenticationKey(bfdInput, plugin.vppChan, nil) + err = plugin.bfdHandler.DeleteBfdUDPAuthenticationKey(bfdInput) if err != nil { return fmt.Errorf("error while removing BFD auth key with ID %v", bfdInput.Id) } @@ -376,7 +380,7 @@ func (plugin *BFDConfigurator) DeleteBfdAuthKey(bfdInput *bfd.SingleHopBFD_Key) // Recreate BFD sessions if necessary if len(sessionList) != 0 { for _, bfdSession := range sessionList { - err := vppcalls.AddBfdUDPSessionFromDetails(bfdSession, plugin.keysIndexes, plugin.log, plugin.vppChan, nil) + err := plugin.bfdHandler.AddBfdUDPSessionFromDetails(bfdSession, plugin.keysIndexes) if err != nil { return err } @@ -390,7 +394,7 @@ func (plugin *BFDConfigurator) DeleteBfdAuthKey(bfdInput *bfd.SingleHopBFD_Key) func (plugin *BFDConfigurator) DumpBFDAuthKeys() ([]*bfd.SingleHopBFD_Key, error) { var bfdAuthKeyList []*bfd.SingleHopBFD_Key - keys, err := vppcalls.DumpBfdKeys(plugin.vppChan, plugin.stopwatch) + keys, err := plugin.bfdHandler.DumpBfdKeys() if err != nil { return bfdAuthKeyList, err } @@ -424,7 +428,7 @@ func (plugin *BFDConfigurator) ConfigureBfdEchoFunction(bfdInput *bfd.SingleHopB return fmt.Errorf("interface %v does not exist", bfdInput.EchoSourceInterface) } - err := vppcalls.AddBfdEchoFunction(bfdInput, plugin.ifIndexes, plugin.vppChan, plugin.stopwatch) + err := plugin.bfdHandler.AddBfdEchoFunction(bfdInput, plugin.ifIndexes) if err != nil { return fmt.Errorf("error while setting up BFD echo source with interface %v", bfdInput.EchoSourceInterface) } @@ -449,7 +453,7 @@ func (plugin *BFDConfigurator) ModifyBfdEchoFunction(oldInput *bfd.SingleHopBFD_ func (plugin *BFDConfigurator) DeleteBfdEchoFunction(bfdInput *bfd.SingleHopBFD_EchoFunction) error { plugin.log.Info("Deleting BFD echo function") - err := vppcalls.DeleteBfdEchoFunction(plugin.vppChan, plugin.stopwatch) + err := plugin.bfdHandler.DeleteBfdEchoFunction() if err != nil { return fmt.Errorf("error while removing BFD echo source with interface %v", bfdInput.EchoSourceInterface) } diff --git a/plugins/vpp/ifplugin/bfd_config_test.go b/plugins/vpp/ifplugin/bfd_config_test.go index d036c90fd9..9dace02759 100644 --- a/plugins/vpp/ifplugin/bfd_config_test.go +++ b/plugins/vpp/ifplugin/bfd_config_test.go @@ -23,6 +23,7 @@ import ( "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/idxvpp/nametoidx" bfd_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" @@ -43,8 +44,9 @@ func TestBfdConfiguratorInit(t *testing.T) { defer connection.Disconnect() plugin := &ifplugin.BFDConfigurator{} + stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) err := plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, - nil, true) + nil, stopwatch) Expect(err).To(BeNil()) err = plugin.Close() @@ -575,7 +577,8 @@ func bfdTestSetup(t *testing.T) (*vppcallmock.TestCtx, *core.Connection, *ifplug swIfIndices := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "stn", nil)) // Configurator plugin := &ifplugin.BFDConfigurator{} - err = plugin.Init(log, connection, swIfIndices, false) + stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) + err = plugin.Init(log, connection, swIfIndices, stopwatch) Expect(err).To(BeNil()) return ctx, connection, plugin, swIfIndices diff --git a/plugins/vpp/ifplugin/data_resync.go b/plugins/vpp/ifplugin/data_resync.go index 10f63bdb2b..34394ab932 100644 --- a/plugins/vpp/ifplugin/data_resync.go +++ b/plugins/vpp/ifplugin/data_resync.go @@ -21,8 +21,6 @@ import ( "strings" _ "github.com/ligato/vpp-agent/plugins/vpp/binapi/nat" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppdump" "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/model/nat" @@ -57,7 +55,7 @@ func (plugin *InterfaceConfigurator) Resync(nbIfs []*intf.Interfaces_Interface) plugin.afPacketConfigurator.clearMapping() // Dump current state of the VPP interfaces - vppIfs, err := vppdump.DumpInterfaces(plugin.log, plugin.vppCh, plugin.stopwatch) + vppIfs, err := plugin.ifHandler.DumpInterfaces() if err != nil { return []error{err} } @@ -190,7 +188,7 @@ func (plugin *InterfaceConfigurator) VerifyVPPConfigPresence(nbIfaces []*intf.In var stop bool // Step 0: Dump actual state of the VPP - vppIfaces, err := vppdump.DumpInterfaces(plugin.log, plugin.vppCh, plugin.stopwatch) + vppIfaces, err := plugin.ifHandler.DumpInterfaces() if err != nil { return stop } @@ -401,7 +399,7 @@ func (plugin *StnConfigurator) Resync(nbStnRules []*stn.STN_Rule) error { if !found { // The rule is attached to non existing interface but it can be removed. If there is a similar // rule in NB config, it will be configured (or cached) - if err := vppcalls.DelStnRule(vppStnRule.SwIfIndex, &vppStnIP, plugin.vppChan, nil); err != nil { + if err := plugin.stnHandler.DelStnRule(vppStnRule.SwIfIndex, &vppStnIP); err != nil { plugin.log.Error(err) wasErr = err } @@ -423,7 +421,7 @@ func (plugin *StnConfigurator) Resync(nbStnRules []*stn.STN_Rule) error { // If STN rule does not exist, it is obsolete if !match { - if err := vppcalls.DelStnRule(vppStnRule.SwIfIndex, &vppStnIP, plugin.vppChan, nil); err != nil { + if err := plugin.stnHandler.DelStnRule(vppStnRule.SwIfIndex, &vppStnIP); err != nil { plugin.log.Error(err) wasErr = err } @@ -454,7 +452,7 @@ func (plugin *NatConfigurator) ResyncNatGlobal(nbGlobal *nat.Nat44Global) error // Re-initialize cache plugin.clearMapping() - vppNatGlobal, err := vppdump.Nat44GlobalConfigDump(plugin.ifIndexes, plugin.log, plugin.vppChan, plugin.stopwatch) + vppNatGlobal, err := plugin.natHandler.Nat44GlobalConfigDump(plugin.ifIndexes) if err != nil { return fmt.Errorf("failed to dump NAT44 global config: %v", err) } @@ -473,7 +471,7 @@ func (plugin *NatConfigurator) ResyncSNat(sNatConf []*nat.Nat44SNat_SNatConfig) func (plugin *NatConfigurator) ResyncDNat(nbDNatConfig []*nat.Nat44DNat_DNatConfig) error { plugin.log.Debug("RESYNC DNAT config.") - vppDNatCfg, err := vppdump.NAT44DNatDump(plugin.ifIndexes, plugin.log, plugin.vppChan, plugin.stopwatch) + vppDNatCfg, err := plugin.natHandler.NAT44DNatDump(plugin.ifIndexes) if err != nil { return fmt.Errorf("failed to dump DNAT config: %v", err) } @@ -1003,7 +1001,7 @@ func (plugin *InterfaceConfigurator) isIfModified(nbIf, vppIf *intf.Interfaces_I // Register interface to mapping and add tag/index to the VPP func (plugin *InterfaceConfigurator) registerInterface(ifName string, ifIdx uint32, ifData *intf.Interfaces_Interface) error { plugin.swIfIndexes.RegisterName(ifName, ifIdx, ifData) - if err := vppcalls.SetInterfaceTag(ifName, ifIdx, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.SetInterfaceTag(ifName, ifIdx); err != nil { return fmt.Errorf("error while adding interface tag %s, index %d: %v", ifName, ifIdx, err) } // Add AF-packet type interface to local cache diff --git a/plugins/vpp/ifplugin/data_resync_test.go b/plugins/vpp/ifplugin/data_resync_test.go index 96f111a9f1..4e5c25acde 100644 --- a/plugins/vpp/ifplugin/data_resync_test.go +++ b/plugins/vpp/ifplugin/data_resync_test.go @@ -22,6 +22,7 @@ import ( govpp "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/idxvpp/nametoidx" "github.com/ligato/vpp-agent/plugins/vpp/binapi/af_packet" bfdApi "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" @@ -178,7 +179,7 @@ func bfdConfiguratorTestInitialization(t *testing.T, mocks []*vppReplyMock) (*if logrus.NewLogRegistry()), connection, index, - false) + measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger())) Expect(err).To(BeNil()) @@ -212,7 +213,7 @@ func stnConfiguratorTestInitialization(t *testing.T, mocks []*vppReplyMock) (*if logrus.NewLogRegistry()), connection, index, - false) + measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger())) Expect(err).To(BeNil()) return plugin, connection @@ -245,7 +246,7 @@ func natConfiguratorTestInitialization(t *testing.T, mocks []*vppReplyMock) (*if logrus.NewLogRegistry()), connection, index, - false) + measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger())) Expect(err).To(BeNil()) return plugin, index, connection diff --git a/plugins/vpp/ifplugin/interface_config.go b/plugins/vpp/ifplugin/interface_config.go index d6d5d4669e..b9a9b8decd 100644 --- a/plugins/vpp/ifplugin/interface_config.go +++ b/plugins/vpp/ifplugin/interface_config.go @@ -28,7 +28,6 @@ import ( govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/cn-infra/utils/addrs" "github.com/ligato/cn-infra/utils/safeclose" @@ -36,10 +35,8 @@ import ( "github.com/ligato/vpp-agent/plugins/govppmux" "github.com/ligato/vpp-agent/plugins/vpp/binapi/dhcp" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/binapi/memif" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppdump" intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" ) @@ -68,6 +65,9 @@ type InterfaceConfigurator struct { vppCh govppapi.Channel + // VPP API handler + ifHandler vppcalls.IfVppAPI + // Notification channels NotifChan chan govppapi.Message // to publish SwInterfaceDetails to interface_state.go DhcpChan chan govppapi.Message // channel to receive DHCP notifications @@ -75,11 +75,14 @@ type InterfaceConfigurator struct { // Init members (channels...) and start go routines func (plugin *InterfaceConfigurator) Init(logger logging.PluginLogger, goVppMux govppmux.API, linux interface{}, - notifChan chan govppapi.Message, defaultMtu uint32, enableStopwatch bool) (err error) { + notifChan chan govppapi.Message, defaultMtu uint32, stopwatch *measure.Stopwatch) (err error) { // Logger plugin.log = logger.NewLogger("-if-conf") plugin.log.Debug("Initializing Interface configurator") + // Stopwatch instance + plugin.stopwatch = stopwatch + // State notification channel plugin.NotifChan = notifChan @@ -90,7 +93,10 @@ func (plugin *InterfaceConfigurator) Init(logger logging.PluginLogger, goVppMux if plugin.vppCh, err = goVppMux.NewAPIChannel(); err != nil { return err } - if err := vppcalls.CheckMsgCompatibilityForInterface(plugin.log, plugin.vppCh); err != nil { + + // VPP API handler + plugin.ifHandler = vppcalls.NewIfVppHandler(plugin.vppCh, plugin.log, plugin.stopwatch) + if err := plugin.ifHandler.CheckMsgCompatibilityForInterface(); err != nil { return err } @@ -99,15 +105,14 @@ func (plugin *InterfaceConfigurator) Init(logger logging.PluginLogger, goVppMux plugin.dhcpIndexes = ifaceidx.NewDHCPIndex(nametoidx.NewNameToIdx(plugin.log, "dhcp_indices", ifaceidx.IndexDHCPMetadata)) plugin.uIfaceCache = make(map[string]string) plugin.vxlanMulticastCache = make(map[string]*intf.Interfaces_Interface) - if plugin.memifScCache, err = vppdump.DumpMemifSocketDetails(plugin.log, plugin.vppCh, - measure.GetTimeLog(memif.MemifSocketFilenameDump{}, plugin.stopwatch)); err != nil { + if plugin.memifScCache, err = plugin.ifHandler.DumpMemifSocketDetails(); err != nil { return err } // Init AF-packet configurator plugin.linux = linux plugin.afPacketConfigurator = &AFPacketConfigurator{} - plugin.afPacketConfigurator.Init(plugin.log, plugin.vppCh, plugin.linux, plugin.swIfIndexes, plugin.stopwatch) + plugin.afPacketConfigurator.Init(plugin.log, plugin.ifHandler, plugin.linux, plugin.swIfIndexes) // DHCP channel plugin.DhcpChan = make(chan govppapi.Message, 1) @@ -115,9 +120,6 @@ func (plugin *InterfaceConfigurator) Init(logger logging.PluginLogger, goVppMux return err } - if enableStopwatch { - plugin.stopwatch = measure.NewStopwatch("InterfaceConfigurator", plugin.log) - } go plugin.watchDHCPNotifications() return nil @@ -135,8 +137,7 @@ func (plugin *InterfaceConfigurator) clearMapping() error { plugin.uIfaceCache = make(map[string]string) plugin.vxlanMulticastCache = make(map[string]*intf.Interfaces_Interface) var err error - if plugin.memifScCache, err = vppdump.DumpMemifSocketDetails(plugin.log, plugin.vppCh, - measure.GetTimeLog(memif.MemifSocketFilenameDump{}, plugin.stopwatch)); err != nil { + if plugin.memifScCache, err = plugin.ifHandler.DumpMemifSocketDetails(); err != nil { return err } return nil @@ -218,23 +219,23 @@ func (plugin *InterfaceConfigurator) ConfigureVPPInterface(iface *intf.Interface switch iface.Type { case intf.InterfaceType_TAP_INTERFACE: - ifIdx, err = vppcalls.AddTapInterface(iface.Name, iface.Tap, plugin.vppCh, plugin.stopwatch) + ifIdx, err = plugin.ifHandler.AddTapInterface(iface.Name, iface.Tap) case intf.InterfaceType_MEMORY_INTERFACE: var id uint32 // Memif socket id id, err = plugin.resolveMemifSocketFilename(iface.Memif) if err != nil { return err } - ifIdx, err = vppcalls.AddMemifInterface(iface.Name, iface.Memif, id, plugin.vppCh, plugin.stopwatch) + ifIdx, err = plugin.ifHandler.AddMemifInterface(iface.Name, iface.Memif, id) case intf.InterfaceType_VXLAN_TUNNEL: // VxLAN multicast interface. Interrupt the processing if there is an error or interface was cached multicastIfIdx, cached, err := plugin.getVxLanMulticast(iface) if err != nil || cached { return err } - ifIdx, err = vppcalls.AddVxlanTunnel(iface.Name, iface.Vxlan, iface.Vrf, multicastIfIdx, plugin.vppCh, plugin.stopwatch) + ifIdx, err = plugin.ifHandler.AddVxlanTunnel(iface.Name, iface.Vxlan, iface.Vrf, multicastIfIdx) case intf.InterfaceType_SOFTWARE_LOOPBACK: - ifIdx, err = vppcalls.AddLoopbackInterface(iface.Name, plugin.vppCh, plugin.stopwatch) + ifIdx, err = plugin.ifHandler.AddLoopbackInterface(iface.Name) case intf.InterfaceType_ETHERNET_CSMACD: var exists bool if ifIdx, _, exists = plugin.swIfIndexes.LookupIdx(iface.Name); !exists { @@ -267,7 +268,7 @@ func (plugin *InterfaceConfigurator) ConfigureVPPInterface(iface *intf.Interface if iface.RxPlacementSettings != nil { // Required in order to get vpp internal name. Must be called from here, calling in vppcalls causes // import cycle - ifMap, err := vppdump.DumpInterfaces(logrus.DefaultLogger(), plugin.vppCh, plugin.stopwatch) + ifMap, err := plugin.ifHandler.DumpInterfaces() if err != nil { return err } @@ -275,28 +276,28 @@ func (plugin *InterfaceConfigurator) ConfigureVPPInterface(iface *intf.Interface if !ok || ifData == nil { return fmt.Errorf("set rx-placement failed, no data available for interface index %d", ifIdx) } - if err := vppcalls.SetRxPlacement(ifData.VPPInternalName, iface.RxPlacementSettings, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.SetRxPlacement(ifData.VPPInternalName, iface.RxPlacementSettings); err != nil { errs = append(errs, err) } } // configure optional mac address (for af packet it is configured in different way) if iface.PhysAddress != "" && iface.Type != intf.InterfaceType_AF_PACKET_INTERFACE { - if err := vppcalls.SetInterfaceMac(ifIdx, iface.PhysAddress, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.SetInterfaceMac(ifIdx, iface.PhysAddress); err != nil { errs = append(errs, err) } } // configure optional vrf if iface.Type != intf.InterfaceType_VXLAN_TUNNEL { - if err := vppcalls.SetInterfaceVRF(ifIdx, iface.Vrf, plugin.log, plugin.vppCh); err != nil { + if err := plugin.ifHandler.SetInterfaceVRF(ifIdx, iface.Vrf); err != nil { errs = append(errs, err) } } // configure DHCP client if iface.SetDhcpClient { - if err := vppcalls.SetInterfaceAsDHCPClient(ifIdx, iface.Name, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.SetInterfaceAsDHCPClient(ifIdx, iface.Name); err != nil { errs = append(errs, err) } else { plugin.log.Debugf("Interface %v set as DHCP client", iface.Name) @@ -314,7 +315,7 @@ func (plugin *InterfaceConfigurator) ConfigureVPPInterface(iface *intf.Interface // configure container IP address if iface.ContainerIpAddress != "" { - if err := vppcalls.AddContainerIP(ifIdx, iface.ContainerIpAddress, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.AddContainerIP(ifIdx, iface.ContainerIpAddress); err != nil { errs = append(errs, err) } else { plugin.log.WithFields(logging.Fields{"IPaddr": iface.ContainerIpAddress, "ifIdx": ifIdx}). @@ -330,7 +331,7 @@ func (plugin *InterfaceConfigurator) ConfigureVPPInterface(iface *intf.Interface } if mtuToConfigure != 0 { iface.Mtu = mtuToConfigure - if err := vppcalls.SetInterfaceMtu(ifIdx, mtuToConfigure, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.SetInterfaceMtu(ifIdx, mtuToConfigure); err != nil { errs = append(errs, err) } } @@ -347,7 +348,7 @@ func (plugin *InterfaceConfigurator) ConfigureVPPInterface(iface *intf.Interface // set interface up if enabled // NOTE: needs to be called after RegisterName, otherwise interface up/down notification won't map to a valid interface if iface.Enabled { - if err := vppcalls.InterfaceAdminUp(ifIdx, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.InterfaceAdminUp(ifIdx); err != nil { l.Warnf("setting interface up failed: %v", err) errs = append(errs, err) return fmt.Errorf("found %d errors: %v", len(errs), errs) @@ -406,7 +407,7 @@ func (plugin *InterfaceConfigurator) configRxModeForInterface(iface *intf.Interf Call specific vpp API method for setting rx-mode */ func (plugin *InterfaceConfigurator) configRxMode(iface *intf.Interfaces_Interface, ifIdx uint32, rxModeSettings *intf.Interfaces_Interface_RxModeSettings) error { - err := vppcalls.SetRxMode(ifIdx, rxModeSettings, plugin.vppCh, plugin.stopwatch) + err := plugin.ifHandler.SetRxMode(ifIdx, rxModeSettings) plugin.log.WithFields(logging.Fields{"ifName": iface.Name, "rxMode": rxModeSettings.RxMode}). Debug("RX-mode configuration for ", iface.Type, ".") return err @@ -426,7 +427,7 @@ func (plugin *InterfaceConfigurator) configureIPAddresses(ifName string, ifIdx u return nil } // Set interface as un-numbered - if err := vppcalls.SetUnnumberedIP(ifIdx, ifIdxIP, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.SetUnnumberedIP(ifIdx, ifIdxIP); err != nil { return err } else { plugin.log.WithFields(logging.Fields{"un-numberedIface": ifIdx, "ifIdxIP": ifIdxIP}).Debug("Interface set as un-numbered") @@ -440,7 +441,7 @@ func (plugin *InterfaceConfigurator) configureIPAddresses(ifName string, ifIdx u // configure optional ip address var wasErr error for _, address := range addresses { - if err := vppcalls.AddInterfaceIP(ifIdx, address, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.AddInterfaceIP(ifIdx, address); err != nil { plugin.log.Errorf("adding interface IP address failed: %v", err) wasErr = err } @@ -456,7 +457,7 @@ func (plugin *InterfaceConfigurator) configureIPAddresses(ifName string, ifIdx u func (plugin *InterfaceConfigurator) removeIPAddresses(ifIdx uint32, addresses []*net.IPNet, unnumbered *intf.Interfaces_Interface_Unnumbered) error { if unnumbered != nil && unnumbered.IsUnnumbered { // Set interface as un-numbered - if err := vppcalls.UnsetUnnumberedIP(ifIdx, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.UnsetUnnumberedIP(ifIdx); err != nil { return err } } @@ -464,7 +465,7 @@ func (plugin *InterfaceConfigurator) removeIPAddresses(ifIdx uint32, addresses [ // delete IP Addresses var wasErr error for _, addr := range addresses { - err := vppcalls.DelInterfaceIP(ifIdx, addr, plugin.vppCh, plugin.stopwatch) + err := plugin.ifHandler.DelInterfaceIP(ifIdx, addr) if err != nil { plugin.log.Errorf("deleting IP address failed: %v", err) wasErr = err @@ -490,7 +491,7 @@ func (plugin *InterfaceConfigurator) resolveDependentUnnumberedInterfaces(ifName delete(plugin.uIfaceCache, uIface) continue } - if err := vppcalls.SetUnnumberedIP(uIdx, ifIdxIP, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.SetUnnumberedIP(uIdx, ifIdxIP); err != nil { plugin.log.Errorf("setting unnumbered IP failed: %v", err) wasErr = err } else { @@ -593,7 +594,7 @@ func (plugin *InterfaceConfigurator) modifyVPPInterface(newConfig *intf.Interfac if newConfig.RxPlacementSettings != nil { // Required in order to get vpp internal name. Must be called from here, calling in vppcalls causes // import cycle - ifMap, err := vppdump.DumpInterfaces(logrus.DefaultLogger(), plugin.vppCh, plugin.stopwatch) + ifMap, err := plugin.ifHandler.DumpInterfaces() if err != nil { return err } @@ -601,7 +602,7 @@ func (plugin *InterfaceConfigurator) modifyVPPInterface(newConfig *intf.Interfac if !ok || ifData == nil { return fmt.Errorf("set rx-placement for new config failed, no data available for interface index %d", ifIdx) } - if err := vppcalls.SetRxPlacement(ifData.VPPInternalName, newConfig.RxPlacementSettings, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.SetRxPlacement(ifData.VPPInternalName, newConfig.RxPlacementSettings); err != nil { wasError = err } } @@ -609,9 +610,9 @@ func (plugin *InterfaceConfigurator) modifyVPPInterface(newConfig *intf.Interfac // admin status if newConfig.Enabled != oldConfig.Enabled { if newConfig.Enabled { - err = vppcalls.InterfaceAdminUp(ifIdx, plugin.vppCh, nil) + err = plugin.ifHandler.InterfaceAdminUp(ifIdx) } else { - err = vppcalls.InterfaceAdminDown(ifIdx, plugin.vppCh, nil) + err = plugin.ifHandler.InterfaceAdminDown(ifIdx) } if nil != err { wasError = err @@ -620,7 +621,7 @@ func (plugin *InterfaceConfigurator) modifyVPPInterface(newConfig *intf.Interfac // configure new mac address if set (and only if it was changed) if newConfig.PhysAddress != "" && newConfig.PhysAddress != oldConfig.PhysAddress { - if err := vppcalls.SetInterfaceMac(ifIdx, newConfig.PhysAddress, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.SetInterfaceMac(ifIdx, newConfig.PhysAddress); err != nil { plugin.log.Errorf("setting interface MAC address failed: %v", err) wasError = err } @@ -629,14 +630,14 @@ func (plugin *InterfaceConfigurator) modifyVPPInterface(newConfig *intf.Interfac // reconfigure DHCP if oldConfig.SetDhcpClient != newConfig.SetDhcpClient { if newConfig.SetDhcpClient { - if err := vppcalls.SetInterfaceAsDHCPClient(ifIdx, newConfig.Name, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.SetInterfaceAsDHCPClient(ifIdx, newConfig.Name); err != nil { plugin.log.Error(err) wasError = err } else { plugin.log.Debugf("Interface %v set as DHCP client", newConfig.Name) } } else { - if err := vppcalls.UnsetInterfaceAsDHCPClient(ifIdx, newConfig.Name, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.UnsetInterfaceAsDHCPClient(ifIdx, newConfig.Name); err != nil { plugin.log.Error(err) wasError = err } else { @@ -669,7 +670,7 @@ func (plugin *InterfaceConfigurator) modifyVPPInterface(newConfig *intf.Interfac wasError = err } - if err := vppcalls.SetInterfaceVRF(ifIdx, newConfig.Vrf, plugin.log, plugin.vppCh); err != nil { + if err := plugin.ifHandler.SetInterfaceVRF(ifIdx, newConfig.Vrf); err != nil { plugin.log.Error(err) wasError = err } @@ -698,7 +699,7 @@ func (plugin *InterfaceConfigurator) modifyVPPInterface(newConfig *intf.Interfac if newConfig.ContainerIpAddress != oldConfig.ContainerIpAddress { plugin.log.WithFields(logging.Fields{"ifIdx": ifIdx, "ip_new": newConfig.ContainerIpAddress, "ip_old": oldConfig.ContainerIpAddress}). Debug("Container IP address modification.") - if err := vppcalls.AddContainerIP(ifIdx, newConfig.ContainerIpAddress, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.AddContainerIP(ifIdx, newConfig.ContainerIpAddress); err != nil { plugin.log.WithFields(logging.Fields{"newIP": newConfig.ContainerIpAddress, "oldIP": oldConfig.ContainerIpAddress, "ifIdx": ifIdx}). Errorf("adding container IP failed: %v", err) wasError = err @@ -707,11 +708,11 @@ func (plugin *InterfaceConfigurator) modifyVPPInterface(newConfig *intf.Interfac // Set MTU if changed in interface config if newConfig.Mtu != 0 && newConfig.Mtu != oldConfig.Mtu { - if err := vppcalls.SetInterfaceMtu(ifIdx, newConfig.Mtu, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.SetInterfaceMtu(ifIdx, newConfig.Mtu); err != nil { wasError = err } } else if newConfig.Mtu == 0 && plugin.defaultMtu != 0 { - if err := vppcalls.SetInterfaceMtu(ifIdx, plugin.defaultMtu, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.SetInterfaceMtu(ifIdx, plugin.defaultMtu); err != nil { wasError = err } } @@ -759,7 +760,7 @@ func (plugin *InterfaceConfigurator) modifyRxModeForInterfaces(oldIntf *intf.Int Direct call of vpp api to change rx-mode of specified interface */ func (plugin *InterfaceConfigurator) modifyRxMode(ifName string, ifIdx uint32, rxMode *intf.Interfaces_Interface_RxModeSettings) error { - err := vppcalls.SetRxMode(ifIdx, rxMode, plugin.vppCh, plugin.stopwatch) + err := plugin.ifHandler.SetRxMode(ifIdx, rxMode) plugin.log.Debugf("RX-mode for %s set to %v", ifName, rxMode.RxMode) return err } @@ -840,7 +841,7 @@ func (plugin *InterfaceConfigurator) deleteVPPInterface(oldConfig *intf.Interfac if oldConfig.Type != intf.InterfaceType_AF_PACKET_INTERFACE { plugin.log.Infof("Setting interface %v down", oldConfig.Name) // Let's try to do following even if previously error occurred - if err := vppcalls.InterfaceAdminDown(ifIdx, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.InterfaceAdminDown(ifIdx); err != nil { plugin.log.Errorf("Setting interface down failed: %v", err) wasError = err } @@ -848,7 +849,7 @@ func (plugin *InterfaceConfigurator) deleteVPPInterface(oldConfig *intf.Interfac // Remove DHCP if it was set if oldConfig.SetDhcpClient { - if err := vppcalls.UnsetInterfaceAsDHCPClient(ifIdx, oldConfig.Name, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.UnsetInterfaceAsDHCPClient(ifIdx, oldConfig.Name); err != nil { plugin.log.Error(err) wasError = err } @@ -859,7 +860,7 @@ func (plugin *InterfaceConfigurator) deleteVPPInterface(oldConfig *intf.Interfac // let's try to do following even if previously error occurred if oldConfig.ContainerIpAddress != "" { - if err := vppcalls.DelContainerIP(ifIdx, oldConfig.ContainerIpAddress, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.DelContainerIP(ifIdx, oldConfig.ContainerIpAddress); err != nil { plugin.log.Error(err) wasError = err } else { @@ -881,7 +882,7 @@ func (plugin *InterfaceConfigurator) deleteVPPInterface(oldConfig *intf.Interfac return err } for _, oldAddr := range oldAddrs { - if err := vppcalls.DelInterfaceIP(ifIdx, oldAddr, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.DelInterfaceIP(ifIdx, oldAddr); err != nil { plugin.log.Errorf("deleting interface IP address failed: %v", err) wasError = err } @@ -892,13 +893,13 @@ func (plugin *InterfaceConfigurator) deleteVPPInterface(oldConfig *intf.Interfac // let's try to do following even if previously error occurred switch oldConfig.Type { case intf.InterfaceType_TAP_INTERFACE: - err = vppcalls.DeleteTapInterface(oldConfig.Name, ifIdx, oldConfig.Tap.Version, plugin.vppCh, plugin.stopwatch) + err = plugin.ifHandler.DeleteTapInterface(oldConfig.Name, ifIdx, oldConfig.Tap.Version) case intf.InterfaceType_MEMORY_INTERFACE: - err = vppcalls.DeleteMemifInterface(oldConfig.Name, ifIdx, plugin.vppCh, plugin.stopwatch) + err = plugin.ifHandler.DeleteMemifInterface(oldConfig.Name, ifIdx) case intf.InterfaceType_VXLAN_TUNNEL: - err = vppcalls.DeleteVxlanTunnel(oldConfig.Name, ifIdx, oldConfig.GetVxlan(), plugin.vppCh, plugin.stopwatch) + err = plugin.ifHandler.DeleteVxlanTunnel(oldConfig.Name, ifIdx, oldConfig.GetVxlan()) case intf.InterfaceType_SOFTWARE_LOOPBACK: - err = vppcalls.DeleteLoopbackInterface(oldConfig.Name, ifIdx, plugin.vppCh, plugin.stopwatch) + err = plugin.ifHandler.DeleteLoopbackInterface(oldConfig.Name, ifIdx) case intf.InterfaceType_ETHERNET_CSMACD: plugin.log.Debugf("Interface removal skipped: cannot remove (blacklist) physical interface") // Not an error return nil @@ -944,7 +945,7 @@ func (plugin *InterfaceConfigurator) resolveMemifSocketFilename(memifIf *intf.In if !ok { // Register new socket. ID is generated (default filename ID is 0, first is ID 1, second ID 2, etc) registeredID = uint32(len(plugin.memifScCache)) - err := vppcalls.RegisterMemifSocketFilename([]byte(memifIf.SocketFilename), registeredID, plugin.vppCh, plugin.stopwatch) + err := plugin.ifHandler.RegisterMemifSocketFilename([]byte(memifIf.SocketFilename), registeredID) if err != nil { return 0, fmt.Errorf("error registering socket file name %s (ID %d): %v", memifIf.SocketFilename, registeredID, err) } diff --git a/plugins/vpp/ifplugin/interface_config_test.go b/plugins/vpp/ifplugin/interface_config_test.go index b7d44f8bfe..76d3d3bf54 100644 --- a/plugins/vpp/ifplugin/interface_config_test.go +++ b/plugins/vpp/ifplugin/interface_config_test.go @@ -24,6 +24,7 @@ import ( govpp "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/af_packet" dhcp_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/dhcp" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" @@ -60,8 +61,9 @@ func TestInterfaceConfiguratorInit(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) // Test init + stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) err = plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, - nil, ifVppNotifChan, 0, false) + nil, ifVppNotifChan, 0, stopwatch) Expect(err).To(BeNil()) Expect(plugin.IsSocketFilenameCached("test-socket-filename")).To(BeTrue()) // Test close @@ -1433,7 +1435,8 @@ func ifTestSetup(t *testing.T) (*vppcallmock.TestCtx, *govpp.Connection, *ifplug // Configurator plugin := &ifplugin.InterfaceConfigurator{} notifChan := make(chan govppapi.Message, 5) - err = plugin.Init(log, connection, 1, notifChan, 1500, false) + stopwatch := measure.NewStopwatch("test-stopwatch", log) + err = plugin.Init(log, connection, 1, notifChan, 1500, stopwatch) Expect(err).To(BeNil()) return ctx, connection, plugin diff --git a/plugins/vpp/ifplugin/nat_config.go b/plugins/vpp/ifplugin/nat_config.go index 8d1705e010..984cf4843f 100644 --- a/plugins/vpp/ifplugin/nat_config.go +++ b/plugins/vpp/ifplugin/nat_config.go @@ -32,7 +32,6 @@ import ( "github.com/ligato/vpp-agent/plugins/govppmux" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppdump" "github.com/ligato/vpp-agent/plugins/vpp/model/nat" ) @@ -79,16 +78,22 @@ type NatConfigurator struct { vppChan govppapi.Channel vppDumpChan govppapi.Channel + // VPP API handler + natHandler vppcalls.NatVppAPI + stopwatch *measure.Stopwatch } // Init NAT configurator func (plugin *NatConfigurator) Init(logger logging.PluginLogger, goVppMux govppmux.API, ifIndexes ifaceidx.SwIfIndex, - enableStopwatch bool) (err error) { + stopwatch *measure.Stopwatch) (err error) { // Logger plugin.log = logger.NewLogger("-nat-conf") plugin.log.Debug("Initializing NAT configurator") + // Stopwatch + plugin.stopwatch = stopwatch + // Mappings plugin.ifIndexes = ifIndexes plugin.sNatIndexes = nametoidx.NewNameToIdx(plugin.log, "snat-indices", nil) @@ -108,13 +113,9 @@ func (plugin *NatConfigurator) Init(logger logging.PluginLogger, goVppMux govppm return err } - // Stopwatch - if enableStopwatch { - plugin.stopwatch = measure.NewStopwatch("InterfaceConfigurator", plugin.log) - } - - // Check VPP message compatibility - if err := vppcalls.CheckMsgCompatibilityForNat(plugin.vppChan); err != nil { + // VPP API handler + plugin.natHandler = vppcalls.NewNatVppHandler(plugin.vppChan, plugin.vppDumpChan, plugin.log, plugin.stopwatch) + if err := plugin.natHandler.CheckMsgCompatibilityForNat(); err != nil { return err } @@ -180,7 +181,7 @@ func (plugin *NatConfigurator) SetNatGlobalConfig(config *nat.Nat44Global) error plugin.globalNAT = config // Forwarding - if err := vppcalls.SetNat44Forwarding(config.Forwarding, plugin.vppChan, plugin.stopwatch); err != nil { + if err := plugin.natHandler.SetNat44Forwarding(config.Forwarding); err != nil { return err } if config.Forwarding { @@ -216,7 +217,7 @@ func (plugin *NatConfigurator) ModifyNatGlobalConfig(oldConfig, newConfig *nat.N // Forwarding if oldConfig.Forwarding != newConfig.Forwarding { - if err := vppcalls.SetNat44Forwarding(newConfig.Forwarding, plugin.vppChan, plugin.stopwatch); err != nil { + if err := plugin.natHandler.SetNat44Forwarding(newConfig.Forwarding); err != nil { return err } } @@ -440,12 +441,12 @@ func (plugin *NatConfigurator) ResolveDeletedInterface(ifName string, ifIdx uint // DumpNatGlobal returns the current NAT44 global config func (plugin *NatConfigurator) DumpNatGlobal() (*nat.Nat44Global, error) { - return vppdump.Nat44GlobalConfigDump(plugin.ifIndexes, plugin.log, plugin.vppDumpChan, plugin.stopwatch) + return plugin.natHandler.Nat44GlobalConfigDump(plugin.ifIndexes) } // DumpNatDNat returns the current NAT44 DNAT config func (plugin *NatConfigurator) DumpNatDNat() (*nat.Nat44DNat, error) { - return vppdump.NAT44DNatDump(plugin.ifIndexes, plugin.log, plugin.vppDumpChan, plugin.stopwatch) + return plugin.natHandler.NAT44DNatDump(plugin.ifIndexes) } // enables set of interfaces as inside/outside in NAT @@ -458,7 +459,7 @@ func (plugin *NatConfigurator) enableNatInterfaces(natInterfaces []*nat.Nat44Glo } else { if natInterface.OutputFeature { // enable nat interface and output feature - if err = vppcalls.EnableNat44InterfaceOutput(ifIdx, natInterface.IsInside, plugin.vppChan, plugin.stopwatch); err != nil { + if err = plugin.natHandler.EnableNat44InterfaceOutput(ifIdx, natInterface.IsInside); err != nil { return } if natInterface.IsInside { @@ -468,7 +469,7 @@ func (plugin *NatConfigurator) enableNatInterfaces(natInterfaces []*nat.Nat44Glo } } else { // enable interface only - if err = vppcalls.EnableNat44Interface(ifIdx, natInterface.IsInside, plugin.vppChan, plugin.stopwatch); err != nil { + if err = plugin.natHandler.EnableNat44Interface(ifIdx, natInterface.IsInside); err != nil { return } if natInterface.IsInside { @@ -500,7 +501,7 @@ func (plugin *NatConfigurator) disableNatInterfaces(natInterfaces []*nat.Nat44Gl } else { if natInterface.OutputFeature { // disable nat interface and output feature - if err = vppcalls.DisableNat44InterfaceOutput(ifIdx, natInterface.IsInside, plugin.vppChan, plugin.stopwatch); err != nil { + if err = plugin.natHandler.DisableNat44InterfaceOutput(ifIdx, natInterface.IsInside); err != nil { return } if natInterface.IsInside { @@ -510,7 +511,7 @@ func (plugin *NatConfigurator) disableNatInterfaces(natInterfaces []*nat.Nat44Gl } } else { // disable interface - if err = vppcalls.DisableNat44Interface(ifIdx, natInterface.IsInside, plugin.vppChan, plugin.stopwatch); err != nil { + if err = plugin.natHandler.DisableNat44Interface(ifIdx, natInterface.IsInside); err != nil { return } if natInterface.IsInside { @@ -561,7 +562,7 @@ func (plugin *NatConfigurator) addAddressPool(addressPools []*nat.Nat44Global_Ad } else if lastIP == nil { lastIP = firstIP } - if err = vppcalls.AddNat44AddressPool(firstIP, lastIP, addressPool.VrfId, addressPool.TwiceNat, plugin.vppChan, plugin.stopwatch); err != nil { + if err = plugin.natHandler.AddNat44AddressPool(firstIP, lastIP, addressPool.VrfId, addressPool.TwiceNat); err != nil { plugin.log.Error(err) wasErr = err } @@ -607,7 +608,7 @@ func (plugin *NatConfigurator) delAddressPool(addressPools []*nat.Nat44Global_Ad } // remove address pool - if err = vppcalls.DelNat44AddressPool(firstIP, lastIP, addressPool.VrfId, addressPool.TwiceNat, plugin.vppChan, plugin.stopwatch); err != nil { + if err = plugin.natHandler.DelNat44AddressPool(firstIP, lastIP, addressPool.VrfId, addressPool.TwiceNat); err != nil { plugin.log.Error(err) wasErr = err } @@ -721,9 +722,9 @@ func (plugin *NatConfigurator) handleStaticMappingLb(staticMappingLb *nat.Nat44D } if add { - return vppcalls.AddNat44StaticMappingLb(ctx, plugin.vppChan, plugin.stopwatch) + return plugin.natHandler.AddNat44StaticMappingLb(ctx) } - return vppcalls.DelNat44StaticMappingLb(ctx, plugin.vppChan, plugin.stopwatch) + return plugin.natHandler.DelNat44StaticMappingLb(ctx) } // handler for single static mapping entry @@ -781,9 +782,9 @@ func (plugin *NatConfigurator) handleStaticMapping(staticMapping *nat.Nat44DNat_ } if add { - return vppcalls.AddNat44StaticMapping(ctx, plugin.vppChan, plugin.stopwatch) + return plugin.natHandler.AddNat44StaticMapping(ctx) } - return vppcalls.DelNat44StaticMapping(ctx, plugin.vppChan, plugin.stopwatch) + return plugin.natHandler.DelNat44StaticMapping(ctx) } // configures a list of identity mappings with label @@ -874,9 +875,9 @@ func (plugin *NatConfigurator) handleIdentityMapping(idMapping *nat.Nat44DNat_DN // Configure/remove identity mapping if isAdd { - return vppcalls.AddNat44IdentityMapping(ctx, plugin.vppChan, plugin.stopwatch) + return plugin.natHandler.AddNat44IdentityMapping(ctx) } - return vppcalls.DelNat44IdentityMapping(ctx, plugin.vppChan, plugin.stopwatch) + return plugin.natHandler.DelNat44IdentityMapping(ctx) } // looks for new and obsolete IN interfaces diff --git a/plugins/vpp/ifplugin/nat_config_test.go b/plugins/vpp/ifplugin/nat_config_test.go index 9324c55291..54e55a2f33 100644 --- a/plugins/vpp/ifplugin/nat_config_test.go +++ b/plugins/vpp/ifplugin/nat_config_test.go @@ -21,6 +21,7 @@ import ( "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/idxvpp/nametoidx" nat_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/nat" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin" @@ -39,8 +40,9 @@ func TestNatConfiguratorInit(t *testing.T) { defer connection.Disconnect() plugin := &ifplugin.NatConfigurator{} + stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) err := plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, - nil, true) + nil, stopwatch) Expect(err).To(BeNil()) err = plugin.Close() @@ -1223,7 +1225,8 @@ func natTestSetup(t *testing.T) (*vppcallmock.TestCtx, *core.Connection, *ifplug swIfIndices := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "nat", nil)) // Configurator plugin := &ifplugin.NatConfigurator{} - err = plugin.Init(log, connection, swIfIndices, false) + stopwatch := measure.NewStopwatch("test-stopwatch", log) + err = plugin.Init(log, connection, swIfIndices, stopwatch) Expect(err).To(BeNil()) return ctx, connection, plugin, swIfIndices diff --git a/plugins/vpp/ifplugin/stn_config.go b/plugins/vpp/ifplugin/stn_config.go index 803f877b9d..c641929d77 100644 --- a/plugins/vpp/ifplugin/stn_config.go +++ b/plugins/vpp/ifplugin/stn_config.go @@ -31,7 +31,6 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/binapi/stn" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppdump" modelStn "github.com/ligato/vpp-agent/plugins/vpp/model/stn" ) @@ -47,7 +46,9 @@ type StnConfigurator struct { unstoredIndexes idxvpp.NameToIdxRW unstoredIndexSeq uint32 // VPP - vppChan govppapi.Channel + vppChan govppapi.VPPChannel + // VPP API handler + stnHandler vppcalls.StnVppAPI // Stopwatch stopwatch *measure.Stopwatch } @@ -66,11 +67,14 @@ func (plugin *StnConfigurator) UnstoredIndexExistsFor(name string) bool { // Init initializes STN configurator func (plugin *StnConfigurator) Init(logger logging.PluginLogger, goVppMux govppmux.API, ifIndexes ifaceidx.SwIfIndex, - enableStopwatch bool) (err error) { + stopwatch *measure.Stopwatch) (err error) { // Init logger plugin.log = logger.NewLogger("-stn-conf") plugin.log.Debug("Initializing STN configurator") + // Stopwatch + plugin.stopwatch = stopwatch + // Init VPP API channel plugin.vppChan, err = goVppMux.NewAPIChannel() if err != nil { @@ -83,13 +87,9 @@ func (plugin *StnConfigurator) Init(logger logging.PluginLogger, goVppMux govppm plugin.unstoredIndexes = nametoidx.NewNameToIdx(plugin.log, "stn-unstored-indexes", nil) plugin.allIndexesSeq, plugin.unstoredIndexSeq = 1, 1 - // Stopwatch - if enableStopwatch { - plugin.stopwatch = measure.NewStopwatch("stnConfigurator", plugin.log) - } - - // Check VPP message compatibility - if err := vppcalls.CheckMsgCompatibilityForStn(plugin.vppChan); err != nil { + // VPP API handler + plugin.stnHandler = vppcalls.NewStnVppHandler(plugin.vppChan, plugin.stopwatch) + if err := plugin.stnHandler.CheckMsgCompatibilityForStn(); err != nil { return err } @@ -137,7 +137,7 @@ func (plugin *StnConfigurator) Add(rule *modelStn.STN_Rule) error { } else { plugin.log.Debugf("adding STN rule: %+v", rule) // Create and register new stn - if err := vppcalls.AddStnRule(stnRule.IfaceIdx, &stnRule.IPAddress, plugin.vppChan, plugin.stopwatch); err != nil { + if err := plugin.stnHandler.AddStnRule(stnRule.IfaceIdx, &stnRule.IPAddress); err != nil { return err } plugin.indexSTNRule(rule, false) @@ -165,7 +165,7 @@ func (plugin *StnConfigurator) Delete(rule *modelStn.STN_Rule) error { plugin.log.Debugf("STN rule: %+v was stored in VPP, trying to delete it. %+v", stnRule) // Remove rule - if err := vppcalls.DelStnRule(stnRule.IfaceIdx, &stnRule.IPAddress, plugin.vppChan, plugin.stopwatch); err != nil { + if err := plugin.stnHandler.DelStnRule(stnRule.IfaceIdx, &stnRule.IPAddress); err != nil { return err } @@ -201,7 +201,7 @@ func (plugin *StnConfigurator) Modify(ruleOld *modelStn.STN_Rule, ruleNew *model // Dump STN rules configured on the VPP func (plugin *StnConfigurator) Dump() ([]*stn.StnRulesDetails, error) { - rules, err := vppdump.DumpStnRules(plugin.vppChan, plugin.stopwatch) + rules, err := plugin.stnHandler.DumpStnRules() if err != nil { return nil, err } diff --git a/plugins/vpp/ifplugin/stn_config_test.go b/plugins/vpp/ifplugin/stn_config_test.go index 265dfa9dcc..909ca35a98 100644 --- a/plugins/vpp/ifplugin/stn_config_test.go +++ b/plugins/vpp/ifplugin/stn_config_test.go @@ -22,6 +22,7 @@ import ( "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/idxvpp/nametoidx" stn_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/stn" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" @@ -41,8 +42,9 @@ func TestStnConfiguratorInit(t *testing.T) { defer connection.Disconnect() plugin := &ifplugin.StnConfigurator{} + stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) err := plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, - nil, true) + nil, stopwatch) Expect(err).To(BeNil()) err = plugin.Close() @@ -421,7 +423,8 @@ func stnTestSetup(t *testing.T) (*vppcallmock.TestCtx, *core.Connection, *ifplug swIfIndices := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "stn", nil)) // Configurator plugin := &ifplugin.StnConfigurator{} - err = plugin.Init(log, connection, swIfIndices, false) + stopwatch := measure.NewStopwatch("test-stopwatch", log) + err = plugin.Init(log, connection, swIfIndices, stopwatch) Expect(err).To(BeNil()) return ctx, connection, plugin, swIfIndices diff --git a/plugins/vpp/ifplugin/vppcalls/admin_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/admin_vppcalls.go index 869db0162d..3c7ccc6ea8 100644 --- a/plugins/vpp/ifplugin/vppcalls/admin_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/admin_vppcalls.go @@ -23,9 +23,25 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" ) -func interfaceSetFlags(ifIdx uint32, adminUp bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ifVppHandler) InterfaceAdminDown(ifIdx uint32) error { + return handler.interfaceSetFlags(ifIdx, false) +} + +func (handler *ifVppHandler) InterfaceAdminUp(ifIdx uint32) error { + return handler.interfaceSetFlags(ifIdx, true) +} + +func (handler *ifVppHandler) SetInterfaceTag(tag string, ifIdx uint32) error { + return handler.handleInterfaceTag(tag, ifIdx, true) +} + +func (handler *ifVppHandler) RemoveInterfaceTag(tag string, ifIdx uint32) error { + return handler.handleInterfaceTag(tag, ifIdx, false) +} + +func (handler *ifVppHandler) interfaceSetFlags(ifIdx uint32, adminUp bool) error { defer func(t time.Time) { - stopwatch.TimeLog(interfaces.SwInterfaceSetFlagsReply{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(interfaces.SwInterfaceSetFlagsReply{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &interfaces.SwInterfaceSetFlags{ @@ -38,7 +54,7 @@ func interfaceSetFlags(ifIdx uint32, adminUp bool, vppChan govppapi.Channel, sto } reply := &interfaces.SwInterfaceSetFlagsReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -48,19 +64,9 @@ func interfaceSetFlags(ifIdx uint32, adminUp bool, vppChan govppapi.Channel, sto return nil } -// InterfaceAdminDown calls binary API SwInterfaceSetFlagsReply with AdminUpDown=0. -func InterfaceAdminDown(ifIdx uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return interfaceSetFlags(ifIdx, false, vppChan, stopwatch) -} - -// InterfaceAdminUp calls binary API SwInterfaceSetFlagsReply with AdminUpDown=1. -func InterfaceAdminUp(ifIdx uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return interfaceSetFlags(ifIdx, true, vppChan, stopwatch) -} - -func handleInterfaceTag(tag string, ifIdx uint32, isAdd bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ifVppHandler) handleInterfaceTag(tag string, ifIdx uint32, isAdd bool) error { defer func(t time.Time) { - stopwatch.TimeLog(interfaces.SwInterfaceTagAddDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(interfaces.SwInterfaceTagAddDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &interfaces.SwInterfaceTagAddDel{ @@ -74,7 +80,7 @@ func handleInterfaceTag(tag string, ifIdx uint32, isAdd bool, vppChan govppapi.C } reply := &interfaces.SwInterfaceTagAddDelReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -83,20 +89,3 @@ func handleInterfaceTag(tag string, ifIdx uint32, isAdd bool, vppChan govppapi.C return nil } - -// SetInterfaceTag registers new interface index/tag pair -func SetInterfaceTag(tag string, ifIdx uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return handleInterfaceTag(tag, ifIdx, true, vppChan, stopwatch) -} - -// RemoveInterfaceTag un-registers new interface index/tag pair -func RemoveInterfaceTag(tag string, ifIdx uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return handleInterfaceTag(tag, ifIdx, false, vppChan, stopwatch) -} - -func boolToUint(value bool) uint8 { - if value { - return 1 - } - return 0 -} diff --git a/plugins/vpp/ifplugin/vppcalls/admin_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/admin_vppcalls_test.go index 4916d5b3e6..2c2fb1ab9b 100644 --- a/plugins/vpp/ifplugin/vppcalls/admin_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/admin_vppcalls_test.go @@ -17,6 +17,8 @@ package vppcalls_test import ( "testing" + "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" "github.com/ligato/vpp-agent/tests/vppcallmock" @@ -24,11 +26,11 @@ import ( ) func TestInterfaceAdminDown(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetFlagsReply{}) - err := vppcalls.InterfaceAdminDown(1, ctx.MockChannel, nil) + err := ifHandler.InterfaceAdminDown(1) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*interfaces.SwInterfaceSetFlags) @@ -39,33 +41,33 @@ func TestInterfaceAdminDown(t *testing.T) { } func TestInterfaceAdminDownError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.HwInterfaceSetMtuReply{}) - err := vppcalls.InterfaceAdminDown(1, ctx.MockChannel, nil) + err := ifHandler.InterfaceAdminDown(1) Expect(err).ToNot(BeNil()) } func TestInterfaceAdminDownRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetFlagsReply{ Retval: 1, }) - err := vppcalls.InterfaceAdminDown(1, ctx.MockChannel, nil) + err := ifHandler.InterfaceAdminDown(1) Expect(err).ToNot(BeNil()) } func TestInterfaceAdminUp(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetFlagsReply{}) - err := vppcalls.InterfaceAdminUp(1, ctx.MockChannel, nil) + err := ifHandler.InterfaceAdminUp(1) Expect(err).ShouldNot(HaveOccurred()) vppMsg, ok := ctx.MockChannel.Msg.(*interfaces.SwInterfaceSetFlags) @@ -76,33 +78,33 @@ func TestInterfaceAdminUp(t *testing.T) { } func TestInterfaceAdminUpError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.HwInterfaceSetMtuReply{}) - err := vppcalls.InterfaceAdminDown(1, ctx.MockChannel, nil) + err := ifHandler.InterfaceAdminDown(1) Expect(err).ToNot(BeNil()) } func TestInterfaceAdminUpRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetFlagsReply{ Retval: 1, }) - err := vppcalls.InterfaceAdminDown(1, ctx.MockChannel, nil) + err := ifHandler.InterfaceAdminDown(1) Expect(err).ToNot(BeNil()) } func TestInterfaceSetTag(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - err := vppcalls.SetInterfaceTag("tag", 1, ctx.MockChannel, nil) + err := ifHandler.SetInterfaceTag("tag", 1) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*interfaces.SwInterfaceTagAddDel) @@ -113,33 +115,33 @@ func TestInterfaceSetTag(t *testing.T) { } func TestInterfaceSetTagError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.HwInterfaceSetMtuReply{}) - err := vppcalls.SetInterfaceTag("tag", 1, ctx.MockChannel, nil) + err := ifHandler.SetInterfaceTag("tag", 1) Expect(err).ToNot(BeNil()) } func TestInterfaceSetTagRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{ Retval: 1, }) - err := vppcalls.SetInterfaceTag("tag", 1, ctx.MockChannel, nil) + err := ifHandler.SetInterfaceTag("tag", 1) Expect(err).ToNot(BeNil()) } func TestInterfaceRemoveTag(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - err := vppcalls.RemoveInterfaceTag("tag", 1, ctx.MockChannel, nil) + err := ifHandler.RemoveInterfaceTag("tag", 1) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*interfaces.SwInterfaceTagAddDel) @@ -150,23 +152,30 @@ func TestInterfaceRemoveTag(t *testing.T) { } func TestInterfaceRemoveTagError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.HwInterfaceSetMtuReply{}) - err := vppcalls.RemoveInterfaceTag("tag", 1, ctx.MockChannel, nil) + err := ifHandler.RemoveInterfaceTag("tag", 1) Expect(err).ToNot(BeNil()) } func TestInterfaceRemoveTagRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{ Retval: 1, }) - err := vppcalls.RemoveInterfaceTag("tag", 1, ctx.MockChannel, nil) + err := ifHandler.RemoveInterfaceTag("tag", 1) Expect(err).ToNot(BeNil()) } + +func ifTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.IfVppAPI) { + ctx := vppcallmock.SetupTestCtx(t) + log := logrus.NewLogger("test-log") + ifHandler := vppcalls.NewIfVppHandler(ctx.MockChannel, log, measure.NewStopwatch("test-stopwatch", log)) + return ctx, ifHandler +} diff --git a/plugins/vpp/ifplugin/vppcalls/afpacket_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/afpacket_vppcalls.go index cee07acf24..d190983d49 100644 --- a/plugins/vpp/ifplugin/vppcalls/afpacket_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/afpacket_vppcalls.go @@ -19,16 +19,14 @@ import ( "time" govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" + "net" "github.com/ligato/vpp-agent/plugins/vpp/binapi/af_packet" intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "net" ) -// AddAfPacketInterface calls AfPacketCreate VPP binary API. -func AddAfPacketInterface(ifName string, hwAddr string, afPacketIntf *intf.Interfaces_Interface_Afpacket, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (swIndex uint32, err error) { +func (handler *ifVppHandler) AddAfPacketInterface(ifName string, hwAddr string, afPacketIntf *intf.Interfaces_Interface_Afpacket) (swIndex uint32, err error) { defer func(t time.Time) { - stopwatch.TimeLog(af_packet.AfPacketCreate{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(af_packet.AfPacketCreate{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &af_packet.AfPacketCreate{ @@ -48,20 +46,19 @@ func AddAfPacketInterface(ifName string, hwAddr string, afPacketIntf *intf.Inter } reply := &af_packet.AfPacketCreateReply{} - if err = vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err = handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return 0, err } if reply.Retval != 0 { return 0, fmt.Errorf("%s returned %d", reply.GetMessageName(), reply.Retval) } - return reply.SwIfIndex, SetInterfaceTag(ifName, reply.SwIfIndex, vppChan, stopwatch) + return reply.SwIfIndex, handler.SetInterfaceTag(ifName, reply.SwIfIndex) } -// DeleteAfPacketInterface calls AfPacketDelete VPP binary API. -func DeleteAfPacketInterface(ifName string, idx uint32, afPacketIntf *intf.Interfaces_Interface_Afpacket, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ifVppHandler) DeleteAfPacketInterface(ifName string, idx uint32, afPacketIntf *intf.Interfaces_Interface_Afpacket) error { defer func(t time.Time) { - stopwatch.TimeLog(af_packet.AfPacketDelete{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(af_packet.AfPacketDelete{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &af_packet.AfPacketDelete{ @@ -69,12 +66,12 @@ func DeleteAfPacketInterface(ifName string, idx uint32, afPacketIntf *intf.Inter } reply := &af_packet.AfPacketDeleteReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { return fmt.Errorf("%s returned %d", reply.GetMessageName(), reply.Retval) } - return RemoveInterfaceTag(ifName, idx, vppChan, stopwatch) + return handler.RemoveInterfaceTag(ifName, idx) } diff --git a/plugins/vpp/ifplugin/vppcalls/afpacket_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/afpacket_vppcalls_test.go index 4820751042..87d57218f2 100644 --- a/plugins/vpp/ifplugin/vppcalls/afpacket_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/afpacket_vppcalls_test.go @@ -20,22 +20,20 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/binapi/af_packet" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" if_api "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) func TestAddAfPacketInterface(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&af_packet.AfPacketCreateReply{}) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - ifIndex, err := vppcalls.AddAfPacketInterface("if1", "", &if_api.Interfaces_Interface_Afpacket{ + ifIndex, err := ifHandler.AddAfPacketInterface("if1", "", &if_api.Interfaces_Interface_Afpacket{ HostIfName: "host1", - }, ctx.MockChannel, nil) + }) Expect(err).To(BeNil()) Expect(ifIndex).ToNot(BeNil()) @@ -54,20 +52,20 @@ func TestAddAfPacketInterface(t *testing.T) { } func TestAddAfPacketInterfaceError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&af_packet.AfPacketDeleteReply{}) - _, err := vppcalls.AddAfPacketInterface("if1", "", &if_api.Interfaces_Interface_Afpacket{ + _, err := ifHandler.AddAfPacketInterface("if1", "", &if_api.Interfaces_Interface_Afpacket{ HostIfName: "host1", - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestAddAfPacketInterfaceRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&af_packet.AfPacketCreateReply{ @@ -75,23 +73,23 @@ func TestAddAfPacketInterfaceRetval(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - _, err := vppcalls.AddAfPacketInterface("if1", "", &if_api.Interfaces_Interface_Afpacket{ + _, err := ifHandler.AddAfPacketInterface("if1", "", &if_api.Interfaces_Interface_Afpacket{ HostIfName: "host1", - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestDeleteAfPacketInterface(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&af_packet.AfPacketDeleteReply{}) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - err := vppcalls.DeleteAfPacketInterface("if1", 0, &if_api.Interfaces_Interface_Afpacket{ + err := ifHandler.DeleteAfPacketInterface("if1", 0, &if_api.Interfaces_Interface_Afpacket{ HostIfName: "host1", - }, ctx.MockChannel, nil) + }) Expect(err).To(BeNil()) Expect(len(ctx.MockChannel.Msgs)).To(BeEquivalentTo(2)) @@ -107,20 +105,20 @@ func TestDeleteAfPacketInterface(t *testing.T) { } func TestDeleteAfPacketInterfaceError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&af_packet.AfPacketCreateReply{}) - err := vppcalls.DeleteAfPacketInterface("if1", 0, &if_api.Interfaces_Interface_Afpacket{ + err := ifHandler.DeleteAfPacketInterface("if1", 0, &if_api.Interfaces_Interface_Afpacket{ HostIfName: "host1", - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestDeleteAfPacketInterfaceRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&af_packet.AfPacketDeleteReply{ @@ -128,23 +126,23 @@ func TestDeleteAfPacketInterfaceRetval(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - err := vppcalls.DeleteAfPacketInterface("if1", 0, &if_api.Interfaces_Interface_Afpacket{ + err := ifHandler.DeleteAfPacketInterface("if1", 0, &if_api.Interfaces_Interface_Afpacket{ HostIfName: "host1", - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestAddAfPacketInterfaceMac(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&af_packet.AfPacketCreateReply{}) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - ifIndex, err := vppcalls.AddAfPacketInterface("if1", "a2:01:01:01:01:01", &if_api.Interfaces_Interface_Afpacket{ + ifIndex, err := ifHandler.AddAfPacketInterface("if1", "a2:01:01:01:01:01", &if_api.Interfaces_Interface_Afpacket{ HostIfName: "host1", - }, ctx.MockChannel, nil) + }) Expect(err).To(BeNil()) Expect(ifIndex).ToNot(BeNil()) diff --git a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go new file mode 100644 index 0000000000..46799d9163 --- /dev/null +++ b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go @@ -0,0 +1,247 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vppcalls + +import ( + "net" + + "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/logging/measure" + "github.com/ligato/vpp-agent/idxvpp" + bfd_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" + "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" + "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" + "github.com/ligato/vpp-agent/plugins/vpp/model/nat" + "github.com/ligato/vpp-agent/plugins/vpp/binapi/stn" +) + +// IfVppAPI provides methods for creating and managing BFD +type IfVppAPI interface { + // AddAfPacketInterface calls AfPacketCreate VPP binary API. + AddAfPacketInterface(ifName string, hwAddr string, afPacketIntf *interfaces.Interfaces_Interface_Afpacket) (swIndex uint32, err error) + // DeleteAfPacketInterface calls AfPacketDelete VPP binary API. + DeleteAfPacketInterface(ifName string, idx uint32, afPacketIntf *interfaces.Interfaces_Interface_Afpacket) error + // AddLoopbackInterface calls CreateLoopback bin API. + AddLoopbackInterface(ifName string) (swIndex uint32, err error) + // DeleteLoopbackInterface calls DeleteLoopback bin API. + DeleteLoopbackInterface(ifName string, idx uint32) error + // AddMemifInterface calls MemifCreate bin API. + AddMemifInterface(ifName string, memIface *interfaces.Interfaces_Interface_Memif, socketID uint32) (swIdx uint32, err error) + // DeleteMemifInterface calls MemifDelete bin API. + DeleteMemifInterface(ifName string, idx uint32) error + // AddTapInterface calls TapConnect bin API. + AddTapInterface(ifName string, tapIf *interfaces.Interfaces_Interface_Tap) (swIfIdx uint32, err error) + // DeleteTapInterface calls TapDelete bin API. + DeleteTapInterface(ifName string, idx uint32, version uint32) error + // AddVxlanTunnel calls AddDelVxlanTunnelReq with flag add=1. + AddVxlanTunnel(ifName string, vxlanIntf *interfaces.Interfaces_Interface_Vxlan, encapVrf, multicastIf uint32) (swIndex uint32, err error) + // DeleteVxlanTunnel calls AddDelVxlanTunnelReq with flag add=0. + DeleteVxlanTunnel(ifName string, idx uint32, vxlanIntf *interfaces.Interfaces_Interface_Vxlan) error + // DumpInterfaces dumps VPP interface data into the northbound API data structure + // map indexed by software interface index. + // + // LIMITATIONS: + // - there is no af_packet dump binary API. We relay on naming conventions of the internal VPP interface names + // - ip.IPAddressDetails has wrong internal structure, as a workaround we need to handle them as notifications + DumpInterfaces() (map[uint32]*Interface, error) + // InterfaceAdminDown calls binary API SwInterfaceSetFlagsReply with AdminUpDown=0. + InterfaceAdminDown(ifIdx uint32) error + // InterfaceAdminUp calls binary API SwInterfaceSetFlagsReply with AdminUpDown=1. + InterfaceAdminUp(ifIdx uint32) error + // SetInterfaceTag registers new interface index/tag pair + SetInterfaceTag(tag string, ifIdx uint32) error + // RemoveInterfaceTag un-registers new interface index/tag pair + RemoveInterfaceTag(tag string, ifIdx uint32) error + // SetInterfaceAsDHCPClient sets provided interface as a DHCP client + SetInterfaceAsDHCPClient(ifIdx uint32, hostName string) error + // UnsetInterfaceAsDHCPClient un-sets interface as DHCP client + UnsetInterfaceAsDHCPClient(ifIdx uint32, hostName string) error + // AddContainerIP calls IPContainerProxyAddDel VPP API with IsAdd=1 + AddContainerIP(ifIdx uint32, addr string) error + // DelContainerIP calls IPContainerProxyAddDel VPP API with IsAdd=0 + DelContainerIP(ifIdx uint32, addr string) error + // AddInterfaceIP calls SwInterfaceAddDelAddress bin API with IsAdd=1. + AddInterfaceIP(ifIdx uint32, addr *net.IPNet) error + // DelInterfaceIP calls SwInterfaceAddDelAddress bin API with IsAdd=00. + DelInterfaceIP(ifIdx uint32, addr *net.IPNet) error + // SetUnnumberedIP sets interface as un-numbered, linking IP address of the another interface (ifIdxWithIP) + SetUnnumberedIP(uIfIdx uint32, ifIdxWithIP uint32) error + // UnsetUnnumberedIP unset provided interface as un-numbered. IP address of the linked interface is removed + UnsetUnnumberedIP(uIfIdx uint32) error + // SetInterfaceMac calls SwInterfaceSetMacAddress bin API. + SetInterfaceMac(ifIdx uint32, macAddress string) error + // RegisterMemifSocketFilename registers new socket file name with provided ID. + RegisterMemifSocketFilename(filename []byte, id uint32) error + // SetInterfaceMtu calls HwInterfaceSetMtu bin API with desired MTU value. + SetInterfaceMtu(ifIdx uint32, mtu uint32) error + // SetRxMode calls SwInterfaceSetRxMode bin + SetRxMode(ifIdx uint32, rxModeSettings *interfaces.Interfaces_Interface_RxModeSettings) error + // SetRxPlacement configures rx-placement for interface + SetRxPlacement(vppInternalName string, rxPlacement *interfaces.Interfaces_Interface_RxPlacementSettings) error + // GetInterfaceVRF assigns VRF table to interface + GetInterfaceVRF(ifIdx uint32) (vrfID uint32, err error) + // SetInterfaceVRF retrieves VRF table from interface + SetInterfaceVRF(ifaceIndex, vrfID uint32) error + // CreateVrfIfNeeded checks if VRF exists and creates it if not + CreateVrfIfNeeded(vrfID uint32) error + // DumpMemifSocketDetails dumps memif socket details from the VPP + DumpMemifSocketDetails() (map[string]uint32, error) + // CheckMsgCompatibilityForInterface checks if interface CRSs are compatible with VPP in runtime. + CheckMsgCompatibilityForInterface() error +} + +// BfdVppAPI provides methods for managing BFD +type BfdVppAPI interface { + // AddBfdUDPSession adds new BFD session with authentication if available. + AddBfdUDPSession(bfdSess *bfd.SingleHopBFD_Session, ifIdx uint32, bfdKeyIndexes idxvpp.NameToIdx) error + // AddBfdUDPSessionFromDetails adds new BFD session with authentication if available. + AddBfdUDPSessionFromDetails(bfdSess *bfd_api.BfdUDPSessionDetails, bfdKeyIndexes idxvpp.NameToIdx) error + // ModifyBfdUDPSession modifies existing BFD session excluding authentication which cannot be changed this way. + ModifyBfdUDPSession(bfdSess *bfd.SingleHopBFD_Session, swIfIndexes ifaceidx.SwIfIndex) error + // DeleteBfdUDPSession removes an existing BFD session. + DeleteBfdUDPSession(ifIndex uint32, sourceAddress string, destAddress string) error + // DumpBfdUDPSessions returns a list of BFD session's metadata + DumpBfdUDPSessions() ([]*bfd_api.BfdUDPSessionDetails, error) + // DumpBfdUDPSessionsWithID returns a list of BFD session's metadata filtered according to provided authentication key + DumpBfdUDPSessionsWithID(authKeyIndex uint32) ([]*bfd_api.BfdUDPSessionDetails, error) + // SetBfdUDPAuthenticationKey creates new authentication key. + SetBfdUDPAuthenticationKey(bfdKey *bfd.SingleHopBFD_Key) error + // DeleteBfdUDPAuthenticationKey removes the authentication key. + DeleteBfdUDPAuthenticationKey(bfdKey *bfd.SingleHopBFD_Key) error + // DumpBfdKeys looks up all BFD auth keys and saves their name-to-index mapping + DumpBfdKeys() (keys []*bfd_api.BfdAuthKeysDetails, err error) + // AddBfdEchoFunction sets up an echo function for the interface. + AddBfdEchoFunction(bfdInput *bfd.SingleHopBFD_EchoFunction, swIfIndexes ifaceidx.SwIfIndex) error + // DeleteBfdEchoFunction removes an echo function. + DeleteBfdEchoFunction() error + // CheckMsgCompatibilityForBfd checks if bfd CRSs are compatible with VPP in runtime. + CheckMsgCompatibilityForBfd() error +} + +// NatVppAPI provides methods for managing NAT +type NatVppAPI interface { + // SetNat44Forwarding configures global forwarding setup for NAT44 + SetNat44Forwarding(enableFwd bool) error + // EnableNat44Interface enables NAT feature for provided interface + EnableNat44Interface(ifIdx uint32, isInside bool) error + // DisableNat44Interface enables NAT feature for provided interface + DisableNat44Interface(ifIdx uint32, isInside bool) error + // EnableNat44InterfaceOutput enables NAT output feature for provided interface + EnableNat44InterfaceOutput(ifIdx uint32, isInside bool) error + // DisableNat44InterfaceOutput disables NAT output feature for provided interface + DisableNat44InterfaceOutput(ifIdx uint32, isInside bool) error + // AddNat44AddressPool sets new NAT address pool + AddNat44AddressPool(first, last []byte, vrf uint32, twiceNat bool) error + // DelNat44AddressPool removes existing NAT address pool + DelNat44AddressPool(first, last []byte, vrf uint32, twiceNat bool) error + // AddNat44IdentityMapping adds new NAT44 identity mapping + AddNat44IdentityMapping(ctx *IdentityMappingContext) error + // DelNat44IdentityMapping removes NAT44 identity mapping + DelNat44IdentityMapping(ctx *IdentityMappingContext) error + // AddNat44StaticMapping creates new static mapping entry (considering address only or both, address and port + // depending on the context) + AddNat44StaticMapping(ctx *StaticMappingContext) error + // DelNat44StaticMapping removes existing static mapping entry + DelNat44StaticMapping(ctx *StaticMappingContext) error + // AddNat44StaticMappingLb creates new static mapping entry with load balancer + AddNat44StaticMappingLb(ctx *StaticMappingLbContext) error + // DelNat44StaticMappingLb removes existing static mapping entry with load balancer + DelNat44StaticMappingLb(ctx *StaticMappingLbContext) error + // Nat44GlobalConfigDump returns global config in NB format + Nat44GlobalConfigDump(swIfIndices ifaceidx.SwIfIndex) (*nat.Nat44Global, error) + // NAT44NatDump dumps all types of mappings, sorts it according to tag (DNAT label) and creates a set of DNAT configurations + NAT44DNatDump(swIfIndices ifaceidx.SwIfIndex) (*nat.Nat44DNat, error) + // Nat44InterfaceDump returns a list of interfaces enabled for NAT44 + Nat44InterfaceDump(swIfIndices ifaceidx.SwIfIndex) (interfaces []*nat.Nat44Global_NatInterface, err error) + // CheckMsgCompatibilityForNat verifies compatibility of used binary API calls + CheckMsgCompatibilityForNat() error +} + +// StnVppAPI provides methods for managing STN +type StnVppAPI interface { + // AddStnRule calls StnAddDelRule bin API with IsAdd=1 + AddStnRule(ifIdx uint32, addr *net.IP) error + // DelStnRule calls StnAddDelRule bin API with IsAdd=0 + DelStnRule(ifIdx uint32, addr *net.IP) error + // DumpStnRules returns a list of all STN rules configured on the VPP + DumpStnRules() (rules []*stn.StnRulesDetails, err error) + // CheckMsgCompatibilityForStn verifies compatibility of used binary API calls + CheckMsgCompatibilityForStn() error +} + +// ifVppHandler is accessor for interface-related vppcalls methods +type ifVppHandler struct { + stopwatch *measure.Stopwatch + callsChannel VPPChannel + log logging.Logger +} + +// bfdVppHandler is accessor for BFD-related vppcalls methods +type bfdVppHandler struct { + stopwatch *measure.Stopwatch + callsChannel VPPChannel + log logging.Logger +} + +// natVppHandler is accessor for NAT-related vppcalls methods +type natVppHandler struct { + stopwatch *measure.Stopwatch + callsChannel VPPChannel + dumpChannel VPPChannel + log logging.Logger +} + +// stnVppHandler is accessor for STN-related vppcalls methods +type stnVppHandler struct { + stopwatch *measure.Stopwatch + callsChannel VPPChannel +} + +// NewIfVppHandler creates new instance of interface vppcalls handler +func NewIfVppHandler(callsChan VPPChannel, log logging.Logger, stopwatch *measure.Stopwatch) *ifVppHandler { + return &ifVppHandler{ + callsChannel: callsChan, + stopwatch: stopwatch, + log: log, + } +} + +// NewBfdVppHandler creates new instance of BFD vppcalls handler +func NewBfdVppHandler(callsChan VPPChannel, log logging.Logger, stopwatch *measure.Stopwatch) *bfdVppHandler { + return &bfdVppHandler{ + callsChannel: callsChan, + stopwatch: stopwatch, + log: log, + } +} + +// NewNatVppHandler creates new instance of NAT vppcalls handler +func NewNatVppHandler(callsChan, dumpChan VPPChannel, log logging.Logger, stopwatch *measure.Stopwatch) *natVppHandler { + return &natVppHandler{ + callsChannel: callsChan, + dumpChannel: dumpChan, + stopwatch: stopwatch, + log: log, + } +} + +// NewStnVppHandler creates new instance of STN vppcalls handler +func NewStnVppHandler(callsChan VPPChannel, stopwatch *measure.Stopwatch) *stnVppHandler { + return &stnVppHandler{ + callsChannel: callsChan, + stopwatch: stopwatch, + } +} diff --git a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go index b8f704e230..0622d8c647 100644 --- a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go @@ -29,11 +29,9 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" ) -// AddBfdUDPSession adds new BFD session with authentication if available. -func AddBfdUDPSession(bfdSess *bfd.SingleHopBFD_Session, ifIdx uint32, bfdKeyIndexes idxvpp.NameToIdx, - log logging.Logger, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *bfdVppHandler) AddBfdUDPSession(bfdSess *bfd.SingleHopBFD_Session, ifIdx uint32, bfdKeyIndexes idxvpp.NameToIdx) error { defer func(t time.Time) { - stopwatch.TimeLog(bfd_api.BfdUDPAdd{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bfd_api.BfdUDPAdd{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &bfd_api.BfdUDPAdd{ @@ -67,14 +65,14 @@ func AddBfdUDPSession(bfdSess *bfd.SingleHopBFD_Session, ifIdx uint32, bfdKeyInd // Authentication if bfdSess.Authentication != nil { keyID := string(bfdSess.Authentication.KeyId) - log.Infof("Setting up authentication with index %v", keyID) + handler.log.Infof("Setting up authentication with index %v", keyID) _, _, found := bfdKeyIndexes.LookupIdx(keyID) if found { req.IsAuthenticated = 1 req.BfdKeyID = uint8(bfdSess.Authentication.KeyId) req.ConfKeyID = bfdSess.Authentication.AdvertisedKeyId } else { - log.Infof("Authentication key %v not found", bfdSess.Authentication.KeyId) + handler.log.Infof("Authentication key %v not found", bfdSess.Authentication.KeyId) req.IsAuthenticated = 0 } } else { @@ -82,7 +80,7 @@ func AddBfdUDPSession(bfdSess *bfd.SingleHopBFD_Session, ifIdx uint32, bfdKeyInd } reply := &bfd_api.BfdUDPAddReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -92,11 +90,9 @@ func AddBfdUDPSession(bfdSess *bfd.SingleHopBFD_Session, ifIdx uint32, bfdKeyInd return nil } -// AddBfdUDPSessionFromDetails adds new BFD session with authentication if available. -func AddBfdUDPSessionFromDetails(bfdSess *bfd_api.BfdUDPSessionDetails, bfdKeyIndexes idxvpp.NameToIdx, log logging.Logger, - vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *bfdVppHandler) AddBfdUDPSessionFromDetails(bfdSess *bfd_api.BfdUDPSessionDetails, bfdKeyIndexes idxvpp.NameToIdx) error { defer func(t time.Time) { - stopwatch.TimeLog(bfd_api.BfdUDPAdd{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bfd_api.BfdUDPAdd{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &bfd_api.BfdUDPAdd{ @@ -112,14 +108,14 @@ func AddBfdUDPSessionFromDetails(bfdSess *bfd_api.BfdUDPSessionDetails, bfdKeyIn // Authentication if bfdSess.IsAuthenticated != 0 { keyID := string(bfdSess.BfdKeyID) - log.Infof("Setting up authentication with index %v", keyID) + handler.log.Infof("Setting up authentication with index %v", keyID) _, _, found := bfdKeyIndexes.LookupIdx(keyID) if found { req.IsAuthenticated = 1 req.BfdKeyID = bfdSess.BfdKeyID req.ConfKeyID = bfdSess.ConfKeyID } else { - log.Infof("Authentication key %v not found", bfdSess.BfdKeyID) + handler.log.Infof("Authentication key %v not found", bfdSess.BfdKeyID) req.IsAuthenticated = 0 } } else { @@ -127,7 +123,7 @@ func AddBfdUDPSessionFromDetails(bfdSess *bfd_api.BfdUDPSessionDetails, bfdKeyIn } reply := &bfd_api.BfdUDPAddReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -137,10 +133,9 @@ func AddBfdUDPSessionFromDetails(bfdSess *bfd_api.BfdUDPSessionDetails, bfdKeyIn return nil } -// ModifyBfdUDPSession modifies existing BFD session excluding authentication which cannot be changed this way. -func ModifyBfdUDPSession(bfdSess *bfd.SingleHopBFD_Session, swIfIndexes ifaceidx.SwIfIndex, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (err error) { +func (handler *bfdVppHandler) ModifyBfdUDPSession(bfdSess *bfd.SingleHopBFD_Session, swIfIndexes ifaceidx.SwIfIndex) error { defer func(t time.Time) { - stopwatch.TimeLog(bfd_api.BfdUDPMod{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bfd_api.BfdUDPMod{}).LogTimeEntry(time.Since(t)) }(time.Now()) // Find the interface @@ -178,7 +173,7 @@ func ModifyBfdUDPSession(bfdSess *bfd.SingleHopBFD_Session, swIfIndexes ifaceidx } reply := &bfd_api.BfdUDPModReply{} - if err = vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err = handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -188,10 +183,9 @@ func ModifyBfdUDPSession(bfdSess *bfd.SingleHopBFD_Session, swIfIndexes ifaceidx return nil } -// DeleteBfdUDPSession removes an existing BFD session. -func DeleteBfdUDPSession(ifIndex uint32, sourceAddress string, destAddress string, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *bfdVppHandler) DeleteBfdUDPSession(ifIndex uint32, sourceAddress string, destAddress string) error { defer func(t time.Time) { - stopwatch.TimeLog(bfd_api.BfdUDPDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bfd_api.BfdUDPDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &bfd_api.BfdUDPDel{ @@ -202,7 +196,7 @@ func DeleteBfdUDPSession(ifIndex uint32, sourceAddress string, destAddress strin } reply := &bfd_api.BfdUDPDelReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -212,23 +206,21 @@ func DeleteBfdUDPSession(ifIndex uint32, sourceAddress string, destAddress strin return nil } -// DumpBfdUDPSessions returns a list of BFD session's metadata -func DumpBfdUDPSessions(vppChan govppapi.Channel, stopwatch *measure.Stopwatch) ([]*bfd_api.BfdUDPSessionDetails, error) { - return dumpBfdUDPSessionsWithID(false, 0, vppChan, stopwatch) +func (handler *bfdVppHandler) DumpBfdUDPSessions() ([]*bfd_api.BfdUDPSessionDetails, error) { + return handler.dumpBfdUDPSessionsWithID(false, 0) } -// DumpBfdUDPSessionsWithID returns a list of BFD session's metadata filtered according to provided authentication key -func DumpBfdUDPSessionsWithID(authKeyIndex uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) ([]*bfd_api.BfdUDPSessionDetails, error) { - return dumpBfdUDPSessionsWithID(true, authKeyIndex, vppChan, stopwatch) +func (handler *bfdVppHandler) DumpBfdUDPSessionsWithID(authKeyIndex uint32) ([]*bfd_api.BfdUDPSessionDetails, error) { + return handler.dumpBfdUDPSessionsWithID(true, authKeyIndex) } -func dumpBfdUDPSessionsWithID(filterID bool, authKeyIndex uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (sessions []*bfd_api.BfdUDPSessionDetails, err error) { +func (handler *bfdVppHandler) dumpBfdUDPSessionsWithID(filterID bool, authKeyIndex uint32) (sessions []*bfd_api.BfdUDPSessionDetails, err error) { defer func(t time.Time) { - stopwatch.TimeLog(bfd_api.BfdUDPSessionDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bfd_api.BfdUDPSessionDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &bfd_api.BfdUDPSessionDump{} - reqCtx := vppChan.SendMultiRequest(req) + reqCtx := handler.callsChannel.SendMultiRequest(req) for { msg := &bfd_api.BfdUDPSessionDetails{} stop, err := reqCtx.ReceiveReply(msg) @@ -255,10 +247,9 @@ func dumpBfdUDPSessionsWithID(filterID bool, authKeyIndex uint32, vppChan govppa return sessions, nil } -// SetBfdUDPAuthenticationKey creates new authentication key. -func SetBfdUDPAuthenticationKey(bfdKey *bfd.SingleHopBFD_Key, log logging.Logger, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (err error) { +func (handler *bfdVppHandler) SetBfdUDPAuthenticationKey(bfdKey *bfd.SingleHopBFD_Key) error { defer func(t time.Time) { - stopwatch.TimeLog(bfd_api.BfdAuthSetKey{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bfd_api.BfdAuthSetKey{}).LogTimeEntry(time.Since(t)) }(time.Now()) // Convert authentication according to RFC5880. @@ -268,7 +259,7 @@ func SetBfdUDPAuthenticationKey(bfdKey *bfd.SingleHopBFD_Key, log logging.Logger } else if bfdKey.AuthenticationType == 1 { authentication = 5 // Meticulous keyed SHA1 } else { - log.Warnf("Provided authentication type not supported, setting up SHA1") + handler.log.Warnf("Provided authentication type not supported, setting up SHA1") authentication = 4 } @@ -280,7 +271,7 @@ func SetBfdUDPAuthenticationKey(bfdKey *bfd.SingleHopBFD_Key, log logging.Logger } reply := &bfd_api.BfdAuthSetKeyReply{} - if err = vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -290,10 +281,9 @@ func SetBfdUDPAuthenticationKey(bfdKey *bfd.SingleHopBFD_Key, log logging.Logger return nil } -// DeleteBfdUDPAuthenticationKey removes the authentication key. -func DeleteBfdUDPAuthenticationKey(bfdKey *bfd.SingleHopBFD_Key, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (err error) { +func (handler *bfdVppHandler) DeleteBfdUDPAuthenticationKey(bfdKey *bfd.SingleHopBFD_Key) error { defer func(t time.Time) { - stopwatch.TimeLog(bfd_api.BfdAuthDelKey{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bfd_api.BfdAuthDelKey{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &bfd_api.BfdAuthDelKey{ @@ -301,7 +291,7 @@ func DeleteBfdUDPAuthenticationKey(bfdKey *bfd.SingleHopBFD_Key, vppChan govppap } reply := &bfd_api.BfdAuthDelKeyReply{} - if err = vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -311,14 +301,13 @@ func DeleteBfdUDPAuthenticationKey(bfdKey *bfd.SingleHopBFD_Key, vppChan govppap return nil } -// DumpBfdKeys looks up all BFD auth keys and saves their name-to-index mapping -func DumpBfdKeys(vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (keys []*bfd_api.BfdAuthKeysDetails, err error) { +func (handler *bfdVppHandler) DumpBfdKeys() (keys []*bfd_api.BfdAuthKeysDetails, err error) { defer func(t time.Time) { - stopwatch.TimeLog(bfd_api.BfdAuthKeysDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bfd_api.BfdAuthKeysDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &bfd_api.BfdAuthKeysDump{} - reqCtx := vppChan.SendMultiRequest(req) + reqCtx := handler.callsChannel.SendMultiRequest(req) for { msg := &bfd_api.BfdAuthKeysDetails{} stop, err := reqCtx.ReceiveReply(msg) @@ -335,10 +324,9 @@ func DumpBfdKeys(vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (keys [ return keys, nil } -// AddBfdEchoFunction sets up an echo function for the interface. -func AddBfdEchoFunction(bfdInput *bfd.SingleHopBFD_EchoFunction, swIfIndexes ifaceidx.SwIfIndex, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (err error) { +func (handler *bfdVppHandler) AddBfdEchoFunction(bfdInput *bfd.SingleHopBFD_EchoFunction, swIfIndexes ifaceidx.SwIfIndex) error { defer func(t time.Time) { - stopwatch.TimeLog(bfd_api.BfdUDPSetEchoSource{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bfd_api.BfdUDPSetEchoSource{}).LogTimeEntry(time.Since(t)) }(time.Now()) // Verify the interface presence. @@ -352,7 +340,7 @@ func AddBfdEchoFunction(bfdInput *bfd.SingleHopBFD_EchoFunction, swIfIndexes ifa } reply := &bfd_api.BfdUDPSetEchoSourceReply{} - if err = vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -362,17 +350,16 @@ func AddBfdEchoFunction(bfdInput *bfd.SingleHopBFD_EchoFunction, swIfIndexes ifa return nil } -// DeleteBfdEchoFunction removes an echo function. -func DeleteBfdEchoFunction(vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (err error) { +func (handler *bfdVppHandler) DeleteBfdEchoFunction() error { defer func(t time.Time) { - stopwatch.TimeLog(bfd_api.BfdUDPDelEchoSource{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bfd_api.BfdUDPDelEchoSource{}).LogTimeEntry(time.Since(t)) }(time.Now()) // Prepare the message. req := &bfd_api.BfdUDPDelEchoSource{} reply := &bfd_api.BfdUDPDelEchoSourceReply{} - if err = vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { diff --git a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go index 380a784983..07db44cac1 100644 --- a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/idxvpp/nametoidx" bfd_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" @@ -30,7 +31,7 @@ import ( ) func TestAddBfdUDPSession(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) @@ -38,7 +39,7 @@ func TestAddBfdUDPSession(t *testing.T) { ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) - err := vppcalls.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ SourceAddress: "10.0.0.1", DestinationAddress: "20.0.0.1", DesiredMinTxInterval: 10, @@ -48,7 +49,7 @@ func TestAddBfdUDPSession(t *testing.T) { KeyId: 1, AdvertisedKeyId: 1, }, - }, 1, bfdKeyIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, 1, bfdKeyIndexes) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*bfd_api.BfdUDPAdd) @@ -62,7 +63,7 @@ func TestAddBfdUDPSession(t *testing.T) { } func TestAddBfdUDPSessionIPv6(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) @@ -70,7 +71,7 @@ func TestAddBfdUDPSessionIPv6(t *testing.T) { ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) - err := vppcalls.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ SourceAddress: "2001:db8::1", DestinationAddress: "2001:db8:0:1:1:1:1:1", DesiredMinTxInterval: 10, @@ -80,7 +81,7 @@ func TestAddBfdUDPSessionIPv6(t *testing.T) { KeyId: 1, AdvertisedKeyId: 1, }, - }, 1, bfdKeyIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, 1, bfdKeyIndexes) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*bfd_api.BfdUDPAdd) @@ -94,14 +95,14 @@ func TestAddBfdUDPSessionIPv6(t *testing.T) { } func TestAddBfdUDPSessionAuthKeyNotFound(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) - err := vppcalls.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ SourceAddress: "10.0.0.1", DestinationAddress: "20.0.0.1", DesiredMinTxInterval: 10, @@ -111,7 +112,7 @@ func TestAddBfdUDPSessionAuthKeyNotFound(t *testing.T) { KeyId: 1, AdvertisedKeyId: 1, }, - }, 1, bfdKeyIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, 1, bfdKeyIndexes) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*bfd_api.BfdUDPAdd) @@ -125,17 +126,17 @@ func TestAddBfdUDPSessionAuthKeyNotFound(t *testing.T) { } func TestAddBfdUDPSessionNoAuthKey(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) - err := vppcalls.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ SourceAddress: "10.0.0.1", DestinationAddress: "20.0.0.1", DesiredMinTxInterval: 10, RequiredMinRxInterval: 15, DetectMultiplier: 2, - }, 1, nil, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, 1, nil) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*bfd_api.BfdUDPAdd) @@ -149,7 +150,7 @@ func TestAddBfdUDPSessionNoAuthKey(t *testing.T) { } func TestAddBfdUDPSessionIncorrectSrcIPError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) @@ -157,7 +158,7 @@ func TestAddBfdUDPSessionIncorrectSrcIPError(t *testing.T) { ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) - err := vppcalls.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ SourceAddress: "incorrect-ip", DestinationAddress: "20.0.0.1", DesiredMinTxInterval: 10, @@ -167,13 +168,13 @@ func TestAddBfdUDPSessionIncorrectSrcIPError(t *testing.T) { KeyId: 1, AdvertisedKeyId: 1, }, - }, 1, bfdKeyIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, 1, bfdKeyIndexes) Expect(err).ToNot(BeNil()) } func TestAddBfdUDPSessionIncorrectDstIPError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) @@ -181,7 +182,7 @@ func TestAddBfdUDPSessionIncorrectDstIPError(t *testing.T) { ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) - err := vppcalls.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ SourceAddress: "10.0.0.1", DestinationAddress: "incorrect-ip", DesiredMinTxInterval: 10, @@ -191,13 +192,13 @@ func TestAddBfdUDPSessionIncorrectDstIPError(t *testing.T) { KeyId: 1, AdvertisedKeyId: 1, }, - }, 1, bfdKeyIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, 1, bfdKeyIndexes) Expect(err).ToNot(BeNil()) } func TestAddBfdUDPSessionIPVerError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) @@ -205,7 +206,7 @@ func TestAddBfdUDPSessionIPVerError(t *testing.T) { ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) - err := vppcalls.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ SourceAddress: "10.0.0.1", DestinationAddress: "2001:db8:0:1:1:1:1:1", DesiredMinTxInterval: 10, @@ -215,47 +216,47 @@ func TestAddBfdUDPSessionIPVerError(t *testing.T) { KeyId: 1, AdvertisedKeyId: 1, }, - }, 1, bfdKeyIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, 1, bfdKeyIndexes) Expect(err).ToNot(BeNil()) } func TestAddBfdUDPSessionError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthSetKeyReply{}) - err := vppcalls.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ SourceAddress: "10.0.0.1", DestinationAddress: "20.0.0.1", DesiredMinTxInterval: 10, RequiredMinRxInterval: 15, DetectMultiplier: 2, - }, 1, nil, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, 1, nil) Expect(err).ToNot(BeNil()) } func TestAddBfdUDPSessionRetvalError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{ Retval: 1, }) - err := vppcalls.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.AddBfdUDPSession(&bfd.SingleHopBFD_Session{ SourceAddress: "10.0.0.1", DestinationAddress: "20.0.0.1", DesiredMinTxInterval: 10, RequiredMinRxInterval: 15, DetectMultiplier: 2, - }, 1, nil, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, 1, nil) Expect(err).ToNot(BeNil()) } func TestAddBfdUDPSessionFromDetails(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) @@ -263,7 +264,7 @@ func TestAddBfdUDPSessionFromDetails(t *testing.T) { ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) - err := vppcalls.AddBfdUDPSessionFromDetails(&bfd_api.BfdUDPSessionDetails{ + err := bfdHandler.AddBfdUDPSessionFromDetails(&bfd_api.BfdUDPSessionDetails{ SwIfIndex: 1, LocalAddr: net.ParseIP("10.0.0.1"), PeerAddr: net.ParseIP("20.0.0.1"), @@ -273,7 +274,7 @@ func TestAddBfdUDPSessionFromDetails(t *testing.T) { RequiredMinRx: 15, DesiredMinTx: 10, DetectMult: 2, - }, bfdKeyIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, bfdKeyIndexes) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*bfd_api.BfdUDPAdd) @@ -287,14 +288,14 @@ func TestAddBfdUDPSessionFromDetails(t *testing.T) { } func TestAddBfdUDPSessionFromDetailsAuthKeyNotFound(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) - err := vppcalls.AddBfdUDPSessionFromDetails(&bfd_api.BfdUDPSessionDetails{ + err := bfdHandler.AddBfdUDPSessionFromDetails(&bfd_api.BfdUDPSessionDetails{ SwIfIndex: 1, LocalAddr: net.ParseIP("10.0.0.1"), PeerAddr: net.ParseIP("20.0.0.1"), @@ -304,7 +305,7 @@ func TestAddBfdUDPSessionFromDetailsAuthKeyNotFound(t *testing.T) { RequiredMinRx: 15, DesiredMinTx: 10, DetectMult: 2, - }, bfdKeyIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, bfdKeyIndexes) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*bfd_api.BfdUDPAdd) @@ -313,14 +314,14 @@ func TestAddBfdUDPSessionFromDetailsAuthKeyNotFound(t *testing.T) { } func TestAddBfdUDPSessionFromDetailsNoAuth(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) - err := vppcalls.AddBfdUDPSessionFromDetails(&bfd_api.BfdUDPSessionDetails{ + err := bfdHandler.AddBfdUDPSessionFromDetails(&bfd_api.BfdUDPSessionDetails{ SwIfIndex: 1, LocalAddr: net.ParseIP("10.0.0.1"), PeerAddr: net.ParseIP("20.0.0.1"), @@ -330,7 +331,7 @@ func TestAddBfdUDPSessionFromDetailsNoAuth(t *testing.T) { RequiredMinRx: 15, DesiredMinTx: 10, DetectMult: 2, - }, bfdKeyIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, bfdKeyIndexes) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*bfd_api.BfdUDPAdd) @@ -339,25 +340,25 @@ func TestAddBfdUDPSessionFromDetailsNoAuth(t *testing.T) { } func TestAddBfdUDPSessionFromDetailsError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) ctx.MockVpp.MockReply(&bfd_api.BfdAuthSetKeyReply{}) - err := vppcalls.AddBfdUDPSessionFromDetails(&bfd_api.BfdUDPSessionDetails{ + err := bfdHandler.AddBfdUDPSessionFromDetails(&bfd_api.BfdUDPSessionDetails{ SwIfIndex: 1, LocalAddr: net.ParseIP("10.0.0.1"), PeerAddr: net.ParseIP("20.0.0.1"), IsIpv6: 0, - }, bfdKeyIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, bfdKeyIndexes) Expect(err).ToNot(BeNil()) } func TestAddBfdUDPSessionFromDetailsRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) @@ -366,18 +367,18 @@ func TestAddBfdUDPSessionFromDetailsRetval(t *testing.T) { Retval: 1, }) - err := vppcalls.AddBfdUDPSessionFromDetails(&bfd_api.BfdUDPSessionDetails{ + err := bfdHandler.AddBfdUDPSessionFromDetails(&bfd_api.BfdUDPSessionDetails{ SwIfIndex: 1, LocalAddr: net.ParseIP("10.0.0.1"), PeerAddr: net.ParseIP("20.0.0.1"), IsIpv6: 0, - }, bfdKeyIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, bfdKeyIndexes) Expect(err).ToNot(BeNil()) } func TestModifyBfdUDPSession(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -385,14 +386,14 @@ func TestModifyBfdUDPSession(t *testing.T) { ctx.MockVpp.MockReply(&bfd_api.BfdUDPModReply{}) - err := vppcalls.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ Interface: "if1", SourceAddress: "10.0.0.1", DestinationAddress: "20.0.0.1", DesiredMinTxInterval: 10, RequiredMinRxInterval: 15, DetectMultiplier: 2, - }, ifIndexes, ctx.MockChannel, nil) + }, ifIndexes) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*bfd_api.BfdUDPMod) @@ -405,7 +406,7 @@ func TestModifyBfdUDPSession(t *testing.T) { } func TestModifyBfdUDPSessionIPv6(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -413,11 +414,11 @@ func TestModifyBfdUDPSessionIPv6(t *testing.T) { ctx.MockVpp.MockReply(&bfd_api.BfdUDPModReply{}) - err := vppcalls.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ Interface: "if1", SourceAddress: "2001:db8::1", DestinationAddress: "2001:db8:0:1:1:1:1:1", - }, ifIndexes, ctx.MockChannel, nil) + }, ifIndexes) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*bfd_api.BfdUDPMod) @@ -426,7 +427,7 @@ func TestModifyBfdUDPSessionIPv6(t *testing.T) { } func TestModifyBfdUDPSessionDifferentIPVer(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -434,34 +435,34 @@ func TestModifyBfdUDPSessionDifferentIPVer(t *testing.T) { ctx.MockVpp.MockReply(&bfd_api.BfdUDPModReply{}) - err := vppcalls.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ Interface: "if1", SourceAddress: "10.0.0.1", DestinationAddress: "2001:db8:0:1:1:1:1:1", - }, ifIndexes, ctx.MockChannel, nil) + }, ifIndexes) Expect(err).ToNot(BeNil()) } func TestModifyBfdUDPSessionNoInterface(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) ctx.MockVpp.MockReply(&bfd_api.BfdUDPModReply{}) - err := vppcalls.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ Interface: "if1", SourceAddress: "10.0.0.1", DestinationAddress: "20.0.0.1", - }, ifIndexes, ctx.MockChannel, nil) + }, ifIndexes) Expect(err).ToNot(BeNil()) } func TestModifyBfdUDPSessionInvalidSrcIP(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -469,17 +470,17 @@ func TestModifyBfdUDPSessionInvalidSrcIP(t *testing.T) { ctx.MockVpp.MockReply(&bfd_api.BfdUDPModReply{}) - err := vppcalls.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ Interface: "if1", SourceAddress: "invalid-ip", DestinationAddress: "20.0.0.1", - }, ifIndexes, ctx.MockChannel, nil) + }, ifIndexes) Expect(err).ToNot(BeNil()) } func TestModifyBfdUDPSessionInvalidDstIP(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -487,17 +488,17 @@ func TestModifyBfdUDPSessionInvalidDstIP(t *testing.T) { ctx.MockVpp.MockReply(&bfd_api.BfdUDPModReply{}) - err := vppcalls.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ Interface: "if1", SourceAddress: "10.0.0.1", DestinationAddress: "invalid-ip", - }, ifIndexes, ctx.MockChannel, nil) + }, ifIndexes) Expect(err).ToNot(BeNil()) } func TestModifyBfdUDPSessionError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -505,17 +506,17 @@ func TestModifyBfdUDPSessionError(t *testing.T) { ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) - err := vppcalls.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ Interface: "if1", SourceAddress: "10.0.0.1", DestinationAddress: "20.0.0.1", - }, ifIndexes, ctx.MockChannel, nil) + }, ifIndexes) Expect(err).ToNot(BeNil()) } func TestModifyBfdUDPSessionRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -525,22 +526,22 @@ func TestModifyBfdUDPSessionRetval(t *testing.T) { Retval: 1, }) - err := vppcalls.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ + err := bfdHandler.ModifyBfdUDPSession(&bfd.SingleHopBFD_Session{ Interface: "if1", SourceAddress: "10.0.0.1", DestinationAddress: "20.0.0.1", - }, ifIndexes, ctx.MockChannel, nil) + }, ifIndexes) Expect(err).ToNot(BeNil()) } func TestDeleteBfdUDPSession(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPDelReply{}) - err := vppcalls.DeleteBfdUDPSession(1, "10.0.0.1", "20.0.0.1", ctx.MockChannel, nil) + err := bfdHandler.DeleteBfdUDPSession(1, "10.0.0.1", "20.0.0.1") Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*bfd_api.BfdUDPDel) @@ -552,31 +553,31 @@ func TestDeleteBfdUDPSession(t *testing.T) { } func TestDeleteBfdUDPSessionError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPModReply{}) - err := vppcalls.DeleteBfdUDPSession(1, "10.0.0.1", "20.0.0.1", ctx.MockChannel, nil) + err := bfdHandler.DeleteBfdUDPSession(1, "10.0.0.1", "20.0.0.1") Expect(err).ToNot(BeNil()) } func TestDeleteBfdUDPSessionRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPDelReply{ Retval: 1, }) - err := vppcalls.DeleteBfdUDPSession(1, "10.0.0.1", "20.0.0.1", ctx.MockChannel, nil) + err := bfdHandler.DeleteBfdUDPSession(1, "10.0.0.1", "20.0.0.1") Expect(err).ToNot(BeNil()) } func TestDumpBfdUDPSessions(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPSessionDetails{ @@ -586,14 +587,14 @@ func TestDumpBfdUDPSessions(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - sessions, err := vppcalls.DumpBfdUDPSessions(ctx.MockChannel, nil) + sessions, err := bfdHandler.DumpBfdUDPSessions() Expect(err).To(BeNil()) Expect(sessions).To(HaveLen(1)) } func TestDumpBfdUDPSessionsWithID(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() // Authenticated wiht ID 1 @@ -622,25 +623,25 @@ func TestDumpBfdUDPSessionsWithID(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - sessions, err := vppcalls.DumpBfdUDPSessionsWithID(1, ctx.MockChannel, nil) + sessions, err := bfdHandler.DumpBfdUDPSessionsWithID(1) Expect(err).To(BeNil()) Expect(sessions).To(HaveLen(1)) } func TestSetBfdUDPAuthenticationKeySha1(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthSetKeyReply{}) - err := vppcalls.SetBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ + err := bfdHandler.SetBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ Name: "bfd-key", AuthKeyIndex: 1, Id: 1, AuthenticationType: bfd.SingleHopBFD_Key_KEYED_SHA1, Secret: "secret", - }, logrus.DefaultLogger(), ctx.MockChannel, nil) + }) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*bfd_api.BfdAuthSetKey) @@ -652,18 +653,18 @@ func TestSetBfdUDPAuthenticationKeySha1(t *testing.T) { } func TestSetBfdUDPAuthenticationKeyMeticulous(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthSetKeyReply{}) - err := vppcalls.SetBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ + err := bfdHandler.SetBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ Name: "bfd-key", AuthKeyIndex: 1, Id: 1, AuthenticationType: bfd.SingleHopBFD_Key_METICULOUS_KEYED_SHA1, Secret: "secret", - }, logrus.DefaultLogger(), ctx.MockChannel, nil) + }) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*bfd_api.BfdAuthSetKey) @@ -675,18 +676,18 @@ func TestSetBfdUDPAuthenticationKeyMeticulous(t *testing.T) { } func TestSetBfdUDPAuthenticationKeyUnknown(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthSetKeyReply{}) - err := vppcalls.SetBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ + err := bfdHandler.SetBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ Name: "bfd-key", AuthKeyIndex: 1, Id: 1, AuthenticationType: 2, // Unknown type Secret: "secret", - }, logrus.DefaultLogger(), ctx.MockChannel, nil) + }) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*bfd_api.BfdAuthSetKey) @@ -698,54 +699,54 @@ func TestSetBfdUDPAuthenticationKeyUnknown(t *testing.T) { } func TestSetBfdUDPAuthenticationError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthDelKeyReply{}) - err := vppcalls.SetBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ + err := bfdHandler.SetBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ Name: "bfd-key", AuthKeyIndex: 1, Id: 1, AuthenticationType: bfd.SingleHopBFD_Key_KEYED_SHA1, Secret: "secret", - }, logrus.DefaultLogger(), ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestSetBfdUDPAuthenticationRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthSetKeyReply{ Retval: 1, }) - err := vppcalls.SetBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ + err := bfdHandler.SetBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ Name: "bfd-key", AuthKeyIndex: 1, Id: 1, AuthenticationType: bfd.SingleHopBFD_Key_KEYED_SHA1, Secret: "secret", - }, logrus.DefaultLogger(), ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestDeleteBfdUDPAuthenticationKey(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthDelKeyReply{}) - err := vppcalls.DeleteBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ + err := bfdHandler.DeleteBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ Name: "bfd-key", AuthKeyIndex: 1, Id: 1, AuthenticationType: bfd.SingleHopBFD_Key_KEYED_SHA1, Secret: "secret", - }, ctx.MockChannel, nil) + }) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*bfd_api.BfdAuthDelKey) @@ -754,43 +755,43 @@ func TestDeleteBfdUDPAuthenticationKey(t *testing.T) { } func TestDeleteBfdUDPAuthenticationKeyError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthSetKeyReply{}) - err := vppcalls.DeleteBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ + err := bfdHandler.DeleteBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ Name: "bfd-key", AuthKeyIndex: 1, Id: 1, AuthenticationType: bfd.SingleHopBFD_Key_KEYED_SHA1, Secret: "secret", - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestDeleteBfdUDPAuthenticationKeyRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthDelKeyReply{ Retval: 1, }) - err := vppcalls.DeleteBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ + err := bfdHandler.DeleteBfdUDPAuthenticationKey(&bfd.SingleHopBFD_Key{ Name: "bfd-key", AuthKeyIndex: 1, Id: 1, AuthenticationType: bfd.SingleHopBFD_Key_KEYED_SHA1, Secret: "secret", - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestDumpBfdKeys(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthKeysDetails{ @@ -805,14 +806,14 @@ func TestDumpBfdKeys(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - keys, err := vppcalls.DumpBfdKeys(ctx.MockChannel, nil) + keys, err := bfdHandler.DumpBfdKeys() Expect(err).To(BeNil()) Expect(keys).To(HaveLen(2)) } func TestAddBfdEchoFunction(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -820,10 +821,10 @@ func TestAddBfdEchoFunction(t *testing.T) { ctx.MockVpp.MockReply(&bfd_api.BfdUDPSetEchoSourceReply{}) - err := vppcalls.AddBfdEchoFunction(&bfd.SingleHopBFD_EchoFunction{ + err := bfdHandler.AddBfdEchoFunction(&bfd.SingleHopBFD_EchoFunction{ Name: "echo", EchoSourceInterface: "if1", - }, ifIndexes, ctx.MockChannel, nil) + }, ifIndexes) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*bfd_api.BfdUDPSetEchoSource) Expect(ok).To(BeTrue()) @@ -831,22 +832,22 @@ func TestAddBfdEchoFunction(t *testing.T) { } func TestAddBfdEchoFunctionInterfaceNotFound(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) ctx.MockVpp.MockReply(&bfd_api.BfdUDPSetEchoSourceReply{}) - err := vppcalls.AddBfdEchoFunction(&bfd.SingleHopBFD_EchoFunction{ + err := bfdHandler.AddBfdEchoFunction(&bfd.SingleHopBFD_EchoFunction{ Name: "echo", EchoSourceInterface: "if1", - }, ifIndexes, ctx.MockChannel, nil) + }, ifIndexes) Expect(err).ToNot(BeNil()) } func TestAddBfdEchoFunctionError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -854,15 +855,15 @@ func TestAddBfdEchoFunctionError(t *testing.T) { ctx.MockVpp.MockReply(&bfd_api.BfdUDPDelEchoSourceReply{}) - err := vppcalls.AddBfdEchoFunction(&bfd.SingleHopBFD_EchoFunction{ + err := bfdHandler.AddBfdEchoFunction(&bfd.SingleHopBFD_EchoFunction{ Name: "echo", EchoSourceInterface: "if1", - }, ifIndexes, ctx.MockChannel, nil) + }, ifIndexes) Expect(err).ToNot(BeNil()) } func TestAddBfdEchoFunctionRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -872,43 +873,50 @@ func TestAddBfdEchoFunctionRetval(t *testing.T) { Retval: 1, }) - err := vppcalls.AddBfdEchoFunction(&bfd.SingleHopBFD_EchoFunction{ + err := bfdHandler.AddBfdEchoFunction(&bfd.SingleHopBFD_EchoFunction{ Name: "echo", EchoSourceInterface: "if1", - }, ifIndexes, ctx.MockChannel, nil) + }, ifIndexes) Expect(err).ToNot(BeNil()) } func TestDeleteBfdEchoFunction(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPDelEchoSourceReply{}) - err := vppcalls.DeleteBfdEchoFunction(ctx.MockChannel, nil) + err := bfdHandler.DeleteBfdEchoFunction() Expect(err).To(BeNil()) _, ok := ctx.MockChannel.Msg.(*bfd_api.BfdUDPDelEchoSource) Expect(ok).To(BeTrue()) } func TestDeleteBfdEchoFunctionError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPSetEchoSourceReply{}) - err := vppcalls.DeleteBfdEchoFunction(ctx.MockChannel, nil) + err := bfdHandler.DeleteBfdEchoFunction() Expect(err).ToNot(BeNil()) } func TestDeleteBfdEchoFunctionRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bfdHandler := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPDelEchoSourceReply{ Retval: 1, }) - err := vppcalls.DeleteBfdEchoFunction(ctx.MockChannel, nil) + err := bfdHandler.DeleteBfdEchoFunction() Expect(err).ToNot(BeNil()) } + +func bfdTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.BfdVppAPI) { + ctx := vppcallmock.SetupTestCtx(t) + log := logrus.NewLogger("test-log") + bfdHandler := vppcalls.NewBfdVppHandler(ctx.MockChannel, log, measure.NewStopwatch("test-stopwatch", log)) + return ctx, bfdHandler +} diff --git a/plugins/vpp/ifplugin/vppcalls/compat_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/compat_vppcalls.go index 6019375dd1..147bbd6cce 100644 --- a/plugins/vpp/ifplugin/vppcalls/compat_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/compat_vppcalls.go @@ -16,7 +16,6 @@ package vppcalls import ( govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging" "github.com/ligato/vpp-agent/plugins/vpp/binapi/af_packet" "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" @@ -29,8 +28,7 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/binapi/vxlan" ) -// CheckMsgCompatibilityForInterface checks if interface CRSs are compatible with VPP in runtime. -func CheckMsgCompatibilityForInterface(log logging.Logger, vppChan govppapi.Channel) error { +func (handler *ifVppHandler) CheckMsgCompatibilityForInterface() error { msgs := []govppapi.Message{ &memif.MemifCreate{}, &memif.MemifCreateReply{}, @@ -97,15 +95,10 @@ func CheckMsgCompatibilityForInterface(log logging.Logger, vppChan govppapi.Chan &ip.IPContainerProxyAddDel{}, &ip.IPContainerProxyAddDelReply{}, } - err := vppChan.CheckMessageCompatibility(msgs...) - if err != nil { - log.Error(err) - } - return err + return handler.callsChannel.CheckMessageCompatibility(msgs...) } -// CheckMsgCompatibilityForBfd checks if bfd CRSs are compatible with VPP in runtime. -func CheckMsgCompatibilityForBfd(vppChan govppapi.Channel) error { +func (handler *bfdVppHandler) CheckMsgCompatibilityForBfd() error { msgs := []govppapi.Message{ &bfd.BfdUDPAdd{}, &bfd.BfdUDPAddReply{}, @@ -118,11 +111,10 @@ func CheckMsgCompatibilityForBfd(vppChan govppapi.Channel) error { &bfd.BfdAuthDelKey{}, &bfd.BfdAuthDelKeyReply{}, } - return vppChan.CheckMessageCompatibility(msgs...) + return handler.callsChannel.CheckMessageCompatibility(msgs...) } -// CheckMsgCompatibilityForNat verifies compatibility of used binary API calls -func CheckMsgCompatibilityForNat(vppChan govppapi.Channel) error { +func (handler *natVppHandler) CheckMsgCompatibilityForNat() error { msgs := []govppapi.Message{ &nat.Nat44AddDelAddressRange{}, &nat.Nat44AddDelAddressRangeReply{}, @@ -135,14 +127,13 @@ func CheckMsgCompatibilityForNat(vppChan govppapi.Channel) error { &nat.Nat44AddDelLbStaticMapping{}, &nat.Nat44AddDelLbStaticMappingReply{}, } - return vppChan.CheckMessageCompatibility(msgs...) + return handler.callsChannel.CheckMessageCompatibility(msgs...) } -// CheckMsgCompatibilityForStn verifies compatibility of used binary API calls -func CheckMsgCompatibilityForStn(vppChan govppapi.Channel) error { +func (handler *stnVppHandler) CheckMsgCompatibilityForStn() error { msgs := []govppapi.Message{ &stn.StnAddDelRule{}, &stn.StnAddDelRuleReply{}, } - return vppChan.CheckMessageCompatibility(msgs...) + return handler.callsChannel.CheckMessageCompatibility(msgs...) } diff --git a/plugins/vpp/ifplugin/vppcalls/dhcp_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/dhcp_vppcalls.go index 2b796fdb22..f8a1502163 100644 --- a/plugins/vpp/ifplugin/vppcalls/dhcp_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/dhcp_vppcalls.go @@ -18,14 +18,12 @@ import ( "fmt" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/dhcp" ) -func handleInterfaceDHCP(ifIdx uint32, hostName string, isAdd bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ifVppHandler) handleInterfaceDHCP(ifIdx uint32, hostName string, isAdd bool) error { defer func(t time.Time) { - stopwatch.TimeLog(dhcp.DhcpClientConfig{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(dhcp.DhcpClientConfig{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &dhcp.DhcpClientConfig{ @@ -40,7 +38,7 @@ func handleInterfaceDHCP(ifIdx uint32, hostName string, isAdd bool, vppChan govp } reply := &dhcp.DhcpClientConfigReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -50,12 +48,10 @@ func handleInterfaceDHCP(ifIdx uint32, hostName string, isAdd bool, vppChan govp return nil } -// SetInterfaceAsDHCPClient sets provided interface as a DHCP client -func SetInterfaceAsDHCPClient(ifIdx uint32, hostName string, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (err error) { - return handleInterfaceDHCP(ifIdx, hostName, true, vppChan, stopwatch) +func (handler *ifVppHandler) SetInterfaceAsDHCPClient(ifIdx uint32, hostName string) error { + return handler.handleInterfaceDHCP(ifIdx, hostName, true) } -// UnsetInterfaceAsDHCPClient un-sets interface as DHCP client -func UnsetInterfaceAsDHCPClient(ifIdx uint32, hostName string, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (err error) { - return handleInterfaceDHCP(ifIdx, hostName, false, vppChan, stopwatch) +func (handler *ifVppHandler) UnsetInterfaceAsDHCPClient(ifIdx uint32, hostName string) error { + return handler.handleInterfaceDHCP(ifIdx, hostName, false) } diff --git a/plugins/vpp/ifplugin/vppcalls/dhcp_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/dhcp_vppcalls_test.go index bca0cf24de..88a7b2dba9 100644 --- a/plugins/vpp/ifplugin/vppcalls/dhcp_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/dhcp_vppcalls_test.go @@ -18,18 +18,16 @@ import ( "testing" "github.com/ligato/vpp-agent/plugins/vpp/binapi/dhcp" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) func TestSetInterfaceAsDHCPClient(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&dhcp.DhcpClientConfigReply{}) - err := vppcalls.SetInterfaceAsDHCPClient(1, "hostName", ctx.MockChannel, nil) + err := ifHandler.SetInterfaceAsDHCPClient(1, "hostName") Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*dhcp.DhcpClientConfig) @@ -41,36 +39,36 @@ func TestSetInterfaceAsDHCPClient(t *testing.T) { } func TestSetInterfaceAsDHCPClientError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&dhcp.DhcpComplEvent{}) - err := vppcalls.SetInterfaceAsDHCPClient(1, "hostName", ctx.MockChannel, nil) + err := ifHandler.SetInterfaceAsDHCPClient(1, "hostName") Expect(err).ToNot(BeNil()) } func TestSetInterfaceAsDHCPClientRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&dhcp.DhcpClientConfigReply{ Retval: 1, }) - err := vppcalls.SetInterfaceAsDHCPClient(1, "hostName", ctx.MockChannel, nil) + err := ifHandler.SetInterfaceAsDHCPClient(1, "hostName") Expect(err).ToNot(BeNil()) } func TestUnsetInterfaceAsDHCPClient(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&dhcp.DhcpClientConfigReply{}) - err := vppcalls.UnsetInterfaceAsDHCPClient(1, "hostName", ctx.MockChannel, nil) + err := ifHandler.UnsetInterfaceAsDHCPClient(1, "hostName") Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*dhcp.DhcpClientConfig) @@ -82,25 +80,25 @@ func TestUnsetInterfaceAsDHCPClient(t *testing.T) { } func TestUnsetInterfaceAsDHCPClientError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&dhcp.DhcpComplEvent{}) - err := vppcalls.UnsetInterfaceAsDHCPClient(1, "hostName", ctx.MockChannel, nil) + err := ifHandler.UnsetInterfaceAsDHCPClient(1, "hostName") Expect(err).ToNot(BeNil()) } func TestUnsetInterfaceAsDHCPClientRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&dhcp.DhcpClientConfigReply{ Retval: 1, }) - err := vppcalls.UnsetInterfaceAsDHCPClient(1, "hostName", ctx.MockChannel, nil) + err := ifHandler.UnsetInterfaceAsDHCPClient(1, "hostName") Expect(err).ToNot(BeNil()) } diff --git a/plugins/vpp/ifplugin/vppcalls/doc.go b/plugins/vpp/ifplugin/vppcalls/doc.go index b6a8c38966..c2b6f93842 100644 --- a/plugins/vpp/ifplugin/vppcalls/doc.go +++ b/plugins/vpp/ifplugin/vppcalls/doc.go @@ -1,3 +1,3 @@ // Package vppcalls contains wrappers over VPP binary APIs for all supported -// interface types. +// interface types and for dumping all interfaces configured in VPP. package vppcalls diff --git a/plugins/vpp/ifplugin/vppdump/dump_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go similarity index 72% rename from plugins/vpp/ifplugin/vppdump/dump_vppcalls.go rename to plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go index 541d559706..58b819779e 100644 --- a/plugins/vpp/ifplugin/vppdump/dump_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package vppdump +package vppcalls import ( "bytes" @@ -23,7 +23,6 @@ import ( "git.fd.io/govpp.git/api" govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" @@ -31,7 +30,6 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/binapi/tap" "github.com/ligato/vpp-agent/plugins/vpp/binapi/tapv2" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vxlan" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" ifnb "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" ) @@ -44,20 +42,13 @@ type Interface struct { ifnb.Interfaces_Interface } -// DumpInterfaces dumps VPP interface data into the northbound API data structure -// map indexed by software interface index. -// -// LIMITATIONS: -// - there is no af_packet dump binary API. We relay on naming conventions of the internal VPP interface names -// - ip.IPAddressDetails has wrong internal structure, as a workaround we need to handle them as notifications -// -func DumpInterfaces(log logging.Logger, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (map[uint32]*Interface, error) { +func (handler *ifVppHandler) DumpInterfaces() (map[uint32]*Interface, error) { start := time.Now() // map for the resulting interfaces ifs := make(map[uint32]*Interface) // First, dump all interfaces to create initial data. - reqCtx := vppChan.SendMultiRequest(&interfaces.SwInterfaceDump{}) + reqCtx := handler.callsChannel.SendMultiRequest(&interfaces.SwInterfaceDump{}) for { ifDetails := &interfaces.SwInterfaceDetails{} @@ -66,8 +57,7 @@ func DumpInterfaces(log logging.Logger, vppChan govppapi.Channel, stopwatch *mea break // Break from the loop. } if err != nil { - log.Error(err) - return nil, err + return nil, fmt.Errorf("failed to dump interface: %v", err) } iface := &Interface{ @@ -93,47 +83,50 @@ func DumpInterfaces(log logging.Logger, vppChan govppapi.Channel, stopwatch *mea ifs[ifDetails.SwIfIndex] = iface if iface.Type == ifnb.InterfaceType_AF_PACKET_INTERFACE { - fillAFPacketDetails(ifs, ifDetails.SwIfIndex, iface.VPPInternalName) + err := handler.dumpAFPacketDetails(ifs, ifDetails.SwIfIndex, iface.VPPInternalName) + if err != nil { + return nil, err + } } } log.Debugf("dumped %d interfaces", len(ifs)) // SwInterfaceDump time - timeLog := measure.GetTimeLog(interfaces.SwInterfaceDump{}, stopwatch) + timeLog := measure.GetTimeLog(interfaces.SwInterfaceDump{}, handler.stopwatch) if timeLog != nil { timeLog.LogTimeEntry(time.Since(start)) } for idx := range ifs { - vrfID, err := vppcalls.GetInterfaceVRF(idx, log, vppChan) + vrfID, err := handler.GetInterfaceVRF(idx) if err != nil { return nil, err } ifs[idx].Vrf = vrfID } - timeLog = measure.GetTimeLog(ip.IPAddressDump{}, stopwatch) - err := dumpIPAddressDetails(log, vppChan, ifs, 0, timeLog) + timeLog = measure.GetTimeLog(ip.IPAddressDump{}, handler.stopwatch) + err := handler.dumpIPAddressDetails(ifs, 0, timeLog) if err != nil { return nil, err } - err = dumpIPAddressDetails(log, vppChan, ifs, 1, timeLog) + err = handler.dumpIPAddressDetails(ifs, 1, timeLog) if err != nil { return nil, err } - err = dumpMemifDetails(log, vppChan, ifs, measure.GetTimeLog(memif.MemifDump{}, stopwatch)) + err = handler.dumpMemifDetails(ifs) if err != nil { return nil, err } - err = dumpTapDetails(log, vppChan, ifs, measure.GetTimeLog(tap.SwInterfaceTapDump{}, stopwatch)) + err = handler.dumpTapDetails(ifs) if err != nil { return nil, err } - err = dumpVxlanDetails(log, vppChan, ifs, measure.GetTimeLog(vxlan.VxlanTunnelDump{}, stopwatch)) + err = handler.dumpVxlanDetails(ifs) if err != nil { return nil, err } @@ -141,19 +134,15 @@ func DumpInterfaces(log logging.Logger, vppChan govppapi.Channel, stopwatch *mea return ifs, nil } -// DumpMemifSocketDetails dumps memif socket details from the VPP -func DumpMemifSocketDetails(log logging.Logger, vppChan govppapi.Channel, timeLog measure.StopWatchEntry) (map[string]uint32, error) { +func (handler *ifVppHandler) DumpMemifSocketDetails() (map[string]uint32, error) { // MemifSocketFilenameDump time measurement - start := time.Now() - defer func() { - if timeLog != nil { - timeLog.LogTimeEntry(time.Since(start)) - } - }() + defer func(t time.Time) { + handler.stopwatch.TimeLog(memif.MemifSocketFilenameDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) memifSocketMap := make(map[string]uint32) - reqCtx := vppChan.SendMultiRequest(&memif.MemifSocketFilenameDump{}) + reqCtx := handler.callsChannel.SendMultiRequest(&memif.MemifSocketFilenameDump{}) for { socketDetails := &memif.MemifSocketFilenameDetails{} stop, err := reqCtx.ReceiveReply(socketDetails) @@ -161,31 +150,30 @@ func DumpMemifSocketDetails(log logging.Logger, vppChan govppapi.Channel, timeLo break // Break from the loop. } if err != nil { - log.Error(err) - return memifSocketMap, err + return memifSocketMap, fmt.Errorf("failed to dump memif socket filename details: %v", err) } filename := string(bytes.SplitN(socketDetails.SocketFilename, []byte{0x00}, 2)[0]) memifSocketMap[filename] = socketDetails.SocketID } - log.Debugf("Memif socket dump completed, found %d entries", len(memifSocketMap)) + handler.log.Debugf("Memif socket dump completed, found %d entries", len(memifSocketMap)) return memifSocketMap, nil } // dumpIPAddressDetails dumps IP address details of interfaces from VPP and fills them into the provided interface map. -func dumpIPAddressDetails(log logging.Logger, vppChan govppapi.Channel, ifs map[uint32]*Interface, isIPv6 uint8, timeLog measure.StopWatchEntry) error { +func (handler *ifVppHandler) dumpIPAddressDetails(ifs map[uint32]*Interface, isIPv6 uint8, timeLog measure.StopWatchEntry) error { // TODO: workaround for incorrect ip.IPAddressDetails message notifChan := make(chan api.Message, 100) - subs, _ := vppChan.SubscribeNotification(notifChan, ip.NewIPAddressDetails) + subs, _ := handler.callsChannel.SubscribeNotification(notifChan, ip.NewIPAddressDetails) // Dump IP addresses of each interface. for idx := range ifs { // IPAddressDetails time measurement start := time.Now() - reqCtx := vppChan.SendMultiRequest(&ip.IPAddressDump{SwIfIndex: idx, IsIpv6: isIPv6}) + reqCtx := handler.callsChannel.SendMultiRequest(&ip.IPAddressDump{SwIfIndex: idx, IsIpv6: isIPv6}) for { ipDetails := &ip.IPAddressDetails{} stop, err := reqCtx.ReceiveReply(ipDetails) @@ -193,16 +181,15 @@ func dumpIPAddressDetails(log logging.Logger, vppChan govppapi.Channel, ifs map[ break // Break from the loop. } if err != nil { - log.Error(err) - return err + return fmt.Errorf("failed to dump interface %d IP address details: %v", idx, err) } - processIPDetails(ifs, ipDetails) + handler.processIPDetails(ifs, ipDetails) } // TODO: workaround for incorrect ip.IPAddressDetails message for len(notifChan) > 0 { notifMsg := <-notifChan - processIPDetails(ifs, notifMsg.(*ip.IPAddressDetails)) + handler.processIPDetails(ifs, notifMsg.(*ip.IPAddressDetails)) } // IPAddressDump time @@ -212,13 +199,13 @@ func dumpIPAddressDetails(log logging.Logger, vppChan govppapi.Channel, ifs map[ } // TODO: workaround for incorrect ip.IPAddressDetails message - vppChan.UnsubscribeNotification(subs) + handler.callsChannel.UnsubscribeNotification(subs) return nil } // processIPDetails processes ip.IPAddressDetails binary API message and fills the details into the provided interface map. -func processIPDetails(ifs map[uint32]*Interface, ipDetails *ip.IPAddressDetails) { +func (handler *ifVppHandler) processIPDetails(ifs map[uint32]*Interface, ipDetails *ip.IPAddressDetails) { _, ifIdxExists := ifs[ipDetails.SwIfIndex] if !ifIdxExists { return @@ -235,8 +222,8 @@ func processIPDetails(ifs map[uint32]*Interface, ipDetails *ip.IPAddressDetails) ifs[ipDetails.SwIfIndex].IpAddresses = append(ifs[ipDetails.SwIfIndex].IpAddresses, ipAddr) } -// fillAFPacketDetails fills af_packet interface details into the provided interface map. -func fillAFPacketDetails(ifs map[uint32]*Interface, swIfIndex uint32, ifName string) { +// dumpAFPacketDetails fills af_packet interface details into the provided interface map. +func (handler *ifVppHandler) dumpAFPacketDetails(ifs map[uint32]*Interface, swIfIndex uint32, ifName string) error { ifs[swIfIndex].Afpacket = &ifnb.Interfaces_Interface_Afpacket{ HostIfName: strings.TrimPrefix(ifName, "host-"), } @@ -244,22 +231,19 @@ func fillAFPacketDetails(ifs map[uint32]*Interface, swIfIndex uint32, ifName str } // dumpMemifDetails dumps memif interface details from VPP and fills them into the provided interface map. -func dumpMemifDetails(log logging.Logger, vppChan govppapi.Channel, ifs map[uint32]*Interface, timeLog measure.StopWatchEntry) error { +func (handler *ifVppHandler) dumpMemifDetails(ifs map[uint32]*Interface) error { // MemifDetails time measurement - start := time.Now() - defer func() { - if timeLog != nil { - timeLog.LogTimeEntry(time.Since(start)) - } - }() + defer func(t time.Time) { + handler.stopwatch.TimeLog(memif.MemifDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) // Dump all memif sockets - memifSocketMap, err := DumpMemifSocketDetails(log, vppChan, timeLog) + memifSocketMap, err := handler.DumpMemifSocketDetails() if err != nil { return err } - reqCtx := vppChan.SendMultiRequest(&memif.MemifDump{}) + reqCtx := handler.callsChannel.SendMultiRequest(&memif.MemifDump{}) for { memifDetails := &memif.MemifDetails{} stop, err := reqCtx.ReceiveReply(memifDetails) @@ -267,8 +251,7 @@ func dumpMemifDetails(log logging.Logger, vppChan govppapi.Channel, ifs map[uint break // Break from the loop. } if err != nil { - log.Error(err) - return err + return fmt.Errorf("failed to dump memif interface: %v", err) } _, ifIdxExists := ifs[memifDetails.SwIfIndex] if !ifIdxExists { @@ -286,7 +269,7 @@ func dumpMemifDetails(log logging.Logger, vppChan govppapi.Channel, ifs map[uint } } // Socket for configured memif should exist - log.Warnf("Socket ID not found for memif %v", memifDetails.SwIfIndex) + handler.log.Warnf("Socket ID not found for memif %v", memifDetails.SwIfIndex) return }(memifSocketMap), RingSize: memifDetails.RingSize, @@ -300,17 +283,14 @@ func dumpMemifDetails(log logging.Logger, vppChan govppapi.Channel, ifs map[uint } // dumpTapDetails dumps tap interface details from VPP and fills them into the provided interface map. -func dumpTapDetails(log logging.Logger, vppChan govppapi.Channel, ifs map[uint32]*Interface, timeLog measure.StopWatchEntry) error { +func (handler *ifVppHandler) dumpTapDetails(ifs map[uint32]*Interface) error { // SwInterfaceTapDump time measurement - start := time.Now() - defer func() { - if timeLog != nil { - timeLog.LogTimeEntry(time.Since(start)) - } - }() + defer func(t time.Time) { + handler.stopwatch.TimeLog(tap.SwInterfaceTapDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) // Original TAP. - reqCtx := vppChan.SendMultiRequest(&tap.SwInterfaceTapDump{}) + reqCtx := handler.callsChannel.SendMultiRequest(&tap.SwInterfaceTapDump{}) for { tapDetails := &tap.SwInterfaceTapDetails{} stop, err := reqCtx.ReceiveReply(tapDetails) @@ -318,8 +298,7 @@ func dumpTapDetails(log logging.Logger, vppChan govppapi.Channel, ifs map[uint32 break // Break from the loop. } if err != nil { - log.Error(err) - return err + return fmt.Errorf("failed to dump TAP interface details: %v", err) } _, ifIdxExists := ifs[tapDetails.SwIfIndex] if !ifIdxExists { @@ -333,7 +312,7 @@ func dumpTapDetails(log logging.Logger, vppChan govppapi.Channel, ifs map[uint32 } // TAP v.2 - reqCtx = vppChan.SendMultiRequest(&tapv2.SwInterfaceTapV2Dump{}) + reqCtx = handler.callsChannel.SendMultiRequest(&tapv2.SwInterfaceTapV2Dump{}) for { tapDetails := &tapv2.SwInterfaceTapV2Details{} stop, err := reqCtx.ReceiveReply(tapDetails) @@ -341,8 +320,7 @@ func dumpTapDetails(log logging.Logger, vppChan govppapi.Channel, ifs map[uint32 break // Break from the loop. } if err != nil { - log.Error(err) - return err + return fmt.Errorf("failed to dump TAPv2 interface details: %v", err) } _, ifIdxExists := ifs[tapDetails.SwIfIndex] if !ifIdxExists { @@ -361,16 +339,13 @@ func dumpTapDetails(log logging.Logger, vppChan govppapi.Channel, ifs map[uint32 } // dumpVxlanDetails dumps VXLAN interface details from VPP and fills them into the provided interface map. -func dumpVxlanDetails(log logging.Logger, vppChan govppapi.Channel, ifs map[uint32]*Interface, timeLog measure.StopWatchEntry) error { +func (handler *ifVppHandler) dumpVxlanDetails(ifs map[uint32]*Interface) error { // VxlanTunnelDump time measurement - start := time.Now() - defer func() { - if timeLog != nil { - timeLog.LogTimeEntry(time.Since(start)) - } - }() + defer func(t time.Time) { + handler.stopwatch.TimeLog(vxlan.VxlanTunnelDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) - reqCtx := vppChan.SendMultiRequest(&vxlan.VxlanTunnelDump{SwIfIndex: ^uint32(0)}) + reqCtx := handler.callsChannel.SendMultiRequest(&vxlan.VxlanTunnelDump{SwIfIndex: ^uint32(0)}) for { vxlanDetails := &vxlan.VxlanTunnelDetails{} stop, err := reqCtx.ReceiveReply(vxlanDetails) @@ -378,8 +353,7 @@ func dumpVxlanDetails(log logging.Logger, vppChan govppapi.Channel, ifs map[uint break // Break from the loop. } if err != nil { - log.Error(err) - return err + return fmt.Errorf("failed to dump VxLAN tunnel interface details: %v", err) } _, ifIdxExists := ifs[vxlanDetails.SwIfIndex] if !ifIdxExists { diff --git a/plugins/vpp/ifplugin/vppdump/dump_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls_test.go similarity index 90% rename from plugins/vpp/ifplugin/vppdump/dump_vppcalls_test.go rename to plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls_test.go index 13075d7d3b..08a299b094 100644 --- a/plugins/vpp/ifplugin/vppdump/dump_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package vppdump +package vppcalls_test import ( "testing" @@ -22,7 +22,6 @@ import ( "git.fd.io/govpp.git/adapter/mock" govppapi "git.fd.io/govpp.git/api" "git.fd.io/govpp.git/core/bin_api/vpe" - "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" "github.com/ligato/vpp-agent/plugins/vpp/binapi/memif" @@ -30,7 +29,6 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/binapi/tapv2" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vxlan" interfaces2 "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) @@ -81,17 +79,17 @@ func vppMockHandler(vppMock *mock.VppAdapter, dataList []*vppReplyMock) mock.Rep // Test dump of interfaces without any replies, should return error and nil // interfaces func TestDumpInterfacesFullySilent(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() - intfs, err := DumpInterfaces(logrus.DefaultLogger(), ctx.MockChannel, nil) + intfs, err := ifHandler.DumpInterfaces() Expect(err).To(Not(BeNil())) Expect(intfs).To(BeNil()) } // Test dump if interfaces without replying to all requests func TestDumpInterfacesSilentSwInterfaceGetTableReply(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReplyHandler(vppMockHandler(ctx.MockVpp, []*vppReplyMock{ @@ -102,14 +100,14 @@ func TestDumpInterfacesSilentSwInterfaceGetTableReply(t *testing.T) { }, })) - intfs, err := DumpInterfaces(logrus.DefaultLogger(), ctx.MockChannel, nil) + intfs, err := ifHandler.DumpInterfaces() Expect(err).To(Not(BeNil())) Expect(intfs).To(BeNil()) } // Test dump if interfaces without replying to all requests func TestDumpInterfacesSilentIpAddressDetails(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReplyHandler(vppMockHandler(ctx.MockVpp, []*vppReplyMock{ @@ -125,14 +123,14 @@ func TestDumpInterfacesSilentIpAddressDetails(t *testing.T) { }, })) - intfs, err := DumpInterfaces(logrus.DefaultLogger(), ctx.MockChannel, nil) + intfs, err := ifHandler.DumpInterfaces() Expect(err).To(Not(BeNil())) Expect(intfs).To(BeNil()) } // Test dump if interfaces without replying to all requests func TestDumpInterfacesSilentMemifSocketFilenameDetails(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReplyHandler(vppMockHandler(ctx.MockVpp, []*vppReplyMock{ @@ -153,14 +151,14 @@ func TestDumpInterfacesSilentMemifSocketFilenameDetails(t *testing.T) { }, })) - intfs, err := DumpInterfaces(logrus.DefaultLogger(), ctx.MockChannel, nil) + intfs, err := ifHandler.DumpInterfaces() Expect(err).To(Not(BeNil())) Expect(intfs).To(BeNil()) } // Test dump if interfaces without replying to all requests func TestDumpInterfacesSilentMemifDetails(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReplyHandler(vppMockHandler(ctx.MockVpp, []*vppReplyMock{ @@ -186,14 +184,14 @@ func TestDumpInterfacesSilentMemifDetails(t *testing.T) { }, })) - intfs, err := DumpInterfaces(logrus.DefaultLogger(), ctx.MockChannel, nil) + intfs, err := ifHandler.DumpInterfaces() Expect(err).To(Not(BeNil())) Expect(intfs).To(BeNil()) } // Test dump if interfaces without replying to all requests func TestDumpInterfacesSilentSwInterfaceTapDetails(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReplyHandler(vppMockHandler(ctx.MockVpp, []*vppReplyMock{ @@ -224,14 +222,14 @@ func TestDumpInterfacesSilentSwInterfaceTapDetails(t *testing.T) { }, })) - intfs, err := DumpInterfaces(logrus.DefaultLogger(), ctx.MockChannel, nil) + intfs, err := ifHandler.DumpInterfaces() Expect(err).To(Not(BeNil())) Expect(intfs).To(BeNil()) } // Test dump if interfaces without replying to all requests func TestDumpInterfacesSilentSwInterfaceTapV2Details(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReplyHandler(vppMockHandler(ctx.MockVpp, []*vppReplyMock{ @@ -267,14 +265,14 @@ func TestDumpInterfacesSilentSwInterfaceTapV2Details(t *testing.T) { }, })) - intfs, err := DumpInterfaces(logrus.DefaultLogger(), ctx.MockChannel, nil) + intfs, err := ifHandler.DumpInterfaces() Expect(err).To(Not(BeNil())) Expect(intfs).To(BeNil()) } // Test dump if interfaces without replying to all requests func TestDumpInterfacesSilentVxlanTunnelDetails(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReplyHandler(vppMockHandler(ctx.MockVpp, []*vppReplyMock{ @@ -315,14 +313,14 @@ func TestDumpInterfacesSilentVxlanTunnelDetails(t *testing.T) { }, })) - intfs, err := DumpInterfaces(logrus.DefaultLogger(), ctx.MockChannel, nil) + intfs, err := ifHandler.DumpInterfaces() Expect(err).To(Not(BeNil())) Expect(intfs).To(BeNil()) } // Test dump of interfaces with vxlan type func TestDumpInterfacesVxLan(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ipv61Parse := net.ParseIP("dead:beef:feed:face:cafe:babe:baad:c0de").To16() @@ -378,7 +376,7 @@ func TestDumpInterfacesVxLan(t *testing.T) { }, })) - intfs, err := DumpInterfaces(logrus.DefaultLogger(), ctx.MockChannel, nil) + intfs, err := ifHandler.DumpInterfaces() Expect(err).To(BeNil()) Expect(intfs).To(HaveLen(1)) intface := intfs[0] @@ -390,7 +388,7 @@ func TestDumpInterfacesVxLan(t *testing.T) { // Test dump of interfaces with host type func TestDumpInterfacesHost(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReplyHandler(vppMockHandler(ctx.MockVpp, []*vppReplyMock{ @@ -438,7 +436,7 @@ func TestDumpInterfacesHost(t *testing.T) { }, })) - intfs, err := DumpInterfaces(logrus.DefaultLogger(), ctx.MockChannel, nil) + intfs, err := ifHandler.DumpInterfaces() Expect(err).To(BeNil()) Expect(intfs).To(HaveLen(1)) intface := intfs[0] @@ -449,7 +447,7 @@ func TestDumpInterfacesHost(t *testing.T) { // Test dump of interfaces with memif type func TestDumpInterfacesMemif(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReplyHandler(vppMockHandler(ctx.MockVpp, []*vppReplyMock{ @@ -508,7 +506,7 @@ func TestDumpInterfacesMemif(t *testing.T) { }, })) - intfs, err := DumpInterfaces(logrus.DefaultLogger(), ctx.MockChannel, nil) + intfs, err := ifHandler.DumpInterfaces() Expect(err).To(BeNil()) Expect(intfs).To(HaveLen(1)) intface := intfs[0] @@ -523,7 +521,7 @@ func TestDumpInterfacesMemif(t *testing.T) { // Test dump of interfaces using custom mock reply handler to avoid issues with ControlPingMessageReply // not being properly recognized func TestDumpInterfacesFull(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() hwAddr1Parse, err := net.ParseMAC("01:23:45:67:89:ab") @@ -603,7 +601,7 @@ func TestDumpInterfacesFull(t *testing.T) { }, })) - intfs, err := DumpInterfaces(logrus.DefaultLogger(), ctx.MockChannel, nil) + intfs, err := ifHandler.DumpInterfaces() Expect(err).To(BeNil()) Expect(intfs).To(HaveLen(1)) @@ -634,7 +632,7 @@ func TestDumpInterfacesFull(t *testing.T) { // Test dump of memif socket details using standard reply mocking func TestDumpMemifSocketDetails(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&memif.MemifSocketFilenameDetails{ @@ -644,7 +642,7 @@ func TestDumpMemifSocketDetails(t *testing.T) { ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - result, err := DumpMemifSocketDetails(logrus.DefaultLogger(), ctx.MockChannel, nil) + result, err := ifHandler.DumpMemifSocketDetails() Expect(err).To(BeNil()) Expect(result).To(Not(BeEmpty())) diff --git a/plugins/vpp/ifplugin/vppdump/dump_nat_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls.go similarity index 60% rename from plugins/vpp/ifplugin/vppdump/dump_nat_vppcalls.go rename to plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls.go index 5e70cd627c..9c8a205454 100644 --- a/plugins/vpp/ifplugin/vppdump/dump_nat_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package vppdump +package vppcalls import ( "bytes" @@ -21,34 +21,27 @@ import ( "strings" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/measure" bin_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/nat" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/nat" ) -// Nat44GlobalConfigDump returns global config in NB format -func Nat44GlobalConfigDump(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChan govppapi.Channel, - stopwatch *measure.Stopwatch) (*nat.Nat44Global, error) { - log.Debug("dumping Nat44Global") - +func (handler *natVppHandler) Nat44GlobalConfigDump(swIfIndices ifaceidx.SwIfIndex) (*nat.Nat44Global, error) { + handler.log.Debug("dumping Nat44Global") // Dump all necessary data to reconstruct global NAT configuration - isEnabled, err := nat44IsForwardingEnabled(log, vppChan, stopwatch) + isEnabled, err := handler.nat44IsForwardingEnabled() if err != nil { return nil, err } - natInterfaces, err := nat44InterfaceDump(swIfIndices, log, vppChan, stopwatch) + natInterfaces, err := handler.Nat44InterfaceDump(swIfIndices) if err != nil { return nil, err } - natOutputFeature, err := nat44InterfaceOutputFeatureDump(swIfIndices, log, vppChan, stopwatch) + natOutputFeature, err := handler.nat44InterfaceOutputFeatureDump(swIfIndices) if err != nil { return nil, err } - natAddressPools, err := nat44AddressDump(log, vppChan, stopwatch) + natAddressPools, err := handler.nat44AddressDump() if err != nil { return nil, err } @@ -70,7 +63,7 @@ func Nat44GlobalConfigDump(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, v }) } - log.Debug("dumped Nat44Global") + handler.log.Debug("dumped Nat44Global") // Set fields return &nat.Nat44Global{ @@ -80,58 +73,52 @@ func Nat44GlobalConfigDump(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, v }, nil } -// NAT44NatDump dumps all types of mappings, sorts it according to tag (DNAT label) and creates a set of DNAT configurations -func NAT44DNatDump(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (*nat.Nat44DNat, error) { +func (handler *natVppHandler) NAT44DNatDump(swIfIndices ifaceidx.SwIfIndex) (*nat.Nat44DNat, error) { // List od DNAT configs var dNatCfgs []*nat.Nat44DNat_DNatConfig - var wasErr error - log.Debug("dumping DNat") + handler.log.Debug("dumping DNat") // Static mappings - natStMappings, err := nat44StaticMappingDump(swIfIndices, log, vppChan, stopwatch) + natStMappings, err := handler.nat44StaticMappingDump(swIfIndices) if err != nil { - log.Errorf("Failed to dump NAT44 static mappings: %v", err) - wasErr = err + return nil, fmt.Errorf("failed to dump NAT44 static mappings: %v", err) } for tag, data := range natStMappings { - processDNatData(tag, data, &dNatCfgs, log) + handler.processDNatData(tag, data, &dNatCfgs) } // Static mappings with load balancer - natStLbMappings, err := nat44StaticMappingLbDump(log, vppChan, stopwatch) + natStLbMappings, err := handler.nat44StaticMappingLbDump() if err != nil { - log.Errorf("Failed to dump NAT44 static mappings with load balancer: %v", err) - wasErr = err + return nil, fmt.Errorf("failed to dump NAT44 static mappings with load balancer: %v", err) } for tag, data := range natStLbMappings { - processDNatData(tag, data, &dNatCfgs, log) + handler.processDNatData(tag, data, &dNatCfgs) } // Identity mappings - natIdMappings, err := nat44IdentityMappingDump(swIfIndices, log, vppChan, stopwatch) + natIdMappings, err := handler.nat44IdentityMappingDump(swIfIndices) if err != nil { - log.Errorf("Failed to dump NAT44 identity mappings: %v", err) - wasErr = err + return nil, fmt.Errorf("failed to dump NAT44 identity mappings: %v", err) } for tag, data := range natIdMappings { - processDNatData(tag, data, &dNatCfgs, log) + handler.processDNatData(tag, data, &dNatCfgs) } - log.Debugf("dumped %d NAT44DNat configs", len(dNatCfgs)) + handler.log.Debugf("dumped %d NAT44DNat configs", len(dNatCfgs)) return &nat.Nat44DNat{ DnatConfigs: dNatCfgs, - }, wasErr + }, nil } // nat44AddressDump returns a list of NAT44 address pools configured in the VPP -func nat44AddressDump(log logging.Logger, vppChan govppapi.Channel, - stopwatch *measure.Stopwatch) (addresses []*nat.Nat44Global_AddressPool, err error) { +func (handler *natVppHandler) nat44AddressDump() (addresses []*nat.Nat44Global_AddressPool, err error) { defer func(t time.Time) { - stopwatch.TimeLog(bin_api.Nat44AddressDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bin_api.Nat44AddressDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &bin_api.Nat44AddressDump{} - reqContext := vppChan.SendMultiRequest(req) + reqContext := handler.dumpChannel.SendMultiRequest(req) for { msg := &bin_api.Nat44AddressDetails{} @@ -152,21 +139,20 @@ func nat44AddressDump(log logging.Logger, vppChan govppapi.Channel, }) } - log.Debugf("NAT44 address pool dump complete, found %d entries", len(addresses)) + handler.log.Debugf("NAT44 address pool dump complete, found %d entries", len(addresses)) return } // nat44StaticMappingDump returns a map of static mapping tag/data pairs -func nat44StaticMappingDump(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChan govppapi.Channel, - stopwatch *measure.Stopwatch) (entries map[string]*nat.Nat44DNat_DNatConfig_StaticMapping, err error) { +func (handler *natVppHandler) nat44StaticMappingDump(swIfIndices ifaceidx.SwIfIndex) (entries map[string]*nat.Nat44DNat_DNatConfig_StaticMapping, err error) { defer func(t time.Time) { - stopwatch.TimeLog(bin_api.Nat44StaticMappingDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bin_api.Nat44StaticMappingDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) entries = make(map[string]*nat.Nat44DNat_DNatConfig_StaticMapping) req := &bin_api.Nat44StaticMappingDump{} - reqContext := vppChan.SendMultiRequest(req) + reqContext := handler.dumpChannel.SendMultiRequest(req) for { msg := &bin_api.Nat44StaticMappingDetails{} @@ -190,7 +176,7 @@ func nat44StaticMappingDump(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, ExternalInterface: func(ifIdx uint32) string { ifName, _, found := swIfIndices.LookupName(ifIdx) if !found && ifIdx != 0xffffffff { - log.Warnf("Interface with index %v not found in the mapping", ifIdx) + handler.log.Warnf("Interface with index %v not found in the mapping", ifIdx) } return ifName }(msg.ExternalSwIfIndex), @@ -200,26 +186,25 @@ func nat44StaticMappingDump(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, LocalIp: lcIPAddress.To4().String(), LocalPort: uint32(msg.LocalPort), }), - Protocol: getNatProtocol(msg.Protocol, log), - TwiceNat: getTwiceNatMode(msg.TwiceNat, msg.SelfTwiceNat, log), + Protocol: handler.getNatProtocol(msg.Protocol), + TwiceNat: handler.getTwiceNatMode(msg.TwiceNat, msg.SelfTwiceNat), } } - log.Debugf("NAT44 static mapping dump complete, found %d entries", len(entries)) + handler.log.Debugf("NAT44 static mapping dump complete, found %d entries", len(entries)) return entries, nil } // nat44StaticMappingLbDump returns a map of static mapping tag/data pairs with load balancer -func nat44StaticMappingLbDump(log logging.Logger, vppChan govppapi.Channel, - stopwatch *measure.Stopwatch) (entries map[string]*nat.Nat44DNat_DNatConfig_StaticMapping, err error) { +func (handler *natVppHandler) nat44StaticMappingLbDump() (entries map[string]*nat.Nat44DNat_DNatConfig_StaticMapping, err error) { defer func(t time.Time) { - stopwatch.TimeLog(bin_api.Nat44LbStaticMappingDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bin_api.Nat44LbStaticMappingDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) entries = make(map[string]*nat.Nat44DNat_DNatConfig_StaticMapping) req := &bin_api.Nat44LbStaticMappingDump{} - reqContext := vppChan.SendMultiRequest(req) + reqContext := handler.dumpChannel.SendMultiRequest(req) for { msg := &bin_api.Nat44LbStaticMappingDetails{} @@ -251,26 +236,25 @@ func nat44StaticMappingLbDump(log logging.Logger, vppChan govppapi.Channel, ExternalIp: exIPAddress.To4().String(), ExternalPort: uint32(msg.ExternalPort), LocalIps: locals, - Protocol: getNatProtocol(msg.Protocol, log), - TwiceNat: getTwiceNatMode(msg.TwiceNat, msg.SelfTwiceNat, log), + Protocol: handler.getNatProtocol(msg.Protocol), + TwiceNat: handler.getTwiceNatMode(msg.TwiceNat, msg.SelfTwiceNat), } } - log.Debugf("NAT44 lb-static mapping dump complete, found %d entries", len(entries)) + handler.log.Debugf("NAT44 lb-static mapping dump complete, found %d entries", len(entries)) return entries, nil } // nat44IdentityMappingDump returns a map of identity mapping tag/data pairs -func nat44IdentityMappingDump(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChan govppapi.Channel, - stopwatch *measure.Stopwatch) (entries map[string]*nat.Nat44DNat_DNatConfig_IdentityMapping, err error) { +func (handler *natVppHandler) nat44IdentityMappingDump(swIfIndices ifaceidx.SwIfIndex) (entries map[string]*nat.Nat44DNat_DNatConfig_IdentityMapping, err error) { defer func(t time.Time) { - stopwatch.TimeLog(bin_api.Nat44IdentityMappingDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bin_api.Nat44IdentityMappingDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) entries = make(map[string]*nat.Nat44DNat_DNatConfig_IdentityMapping) req := &bin_api.Nat44IdentityMappingDump{} - reqContext := vppChan.SendMultiRequest(req) + reqContext := handler.dumpChannel.SendMultiRequest(req) for { msg := &bin_api.Nat44IdentityMappingDetails{} @@ -293,30 +277,28 @@ func nat44IdentityMappingDump(swIfIndices ifaceidx.SwIfIndex, log logging.Logger AddressedInterface: func(ifIdx uint32) string { ifName, _, found := swIfIndices.LookupName(ifIdx) if !found && ifIdx != 0xffffffff { - log.Warnf("Interface with index %v not found in the mapping", ifIdx) + handler.log.Warnf("Interface with index %v not found in the mapping", ifIdx) } return ifName }(msg.SwIfIndex), IpAddress: ipAddress.To4().String(), Port: uint32(msg.Port), - Protocol: getNatProtocol(msg.Protocol, log), + Protocol: handler.getNatProtocol(msg.Protocol), } } - log.Debugf("NAT44 identity mapping dump complete, found %d entries", len(entries)) + handler.log.Debugf("NAT44 identity mapping dump complete, found %d entries", len(entries)) return entries, nil } -// nat44InterfaceDump returns a list of interfaces enabled for NAT44 -func nat44InterfaceDump(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChan govppapi.Channel, - stopwatch *measure.Stopwatch) (interfaces []*nat.Nat44Global_NatInterface, err error) { +func (handler *natVppHandler) Nat44InterfaceDump(swIfIndices ifaceidx.SwIfIndex) (interfaces []*nat.Nat44Global_NatInterface, err error) { defer func(t time.Time) { - stopwatch.TimeLog(bin_api.Nat44InterfaceDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bin_api.Nat44InterfaceDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &bin_api.Nat44InterfaceDump{} - reqContext := vppChan.SendMultiRequest(req) + reqContext := handler.dumpChannel.SendMultiRequest(req) for { msg := &bin_api.Nat44InterfaceDetails{} @@ -331,7 +313,7 @@ func nat44InterfaceDump(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppC // Find interface name ifName, _, found := swIfIndices.LookupName(msg.SwIfIndex) if !found { - log.Warnf("Interface with index %d not found in the mapping", msg.SwIfIndex) + handler.log.Warnf("Interface with index %d not found in the mapping", msg.SwIfIndex) continue } @@ -349,20 +331,19 @@ func nat44InterfaceDump(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppC } } - log.Debugf("NAT44 interface dump complete, found %d entries", len(interfaces)) + handler.log.Debugf("NAT44 interface dump complete, found %d entries", len(interfaces)) return } // nat44InterfaceOutputFeatureDump returns a list of interfaces with output feature set -func nat44InterfaceOutputFeatureDump(swIfIndices ifaceidx.SwIfIndex, log logging.Logger, - vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (ifaces []*nat.Nat44Global_NatInterface, err error) { +func (handler *natVppHandler) nat44InterfaceOutputFeatureDump(swIfIndices ifaceidx.SwIfIndex) (ifaces []*nat.Nat44Global_NatInterface, err error) { defer func(t time.Time) { - stopwatch.TimeLog(bin_api.Nat44InterfaceOutputFeatureDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bin_api.Nat44InterfaceOutputFeatureDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &bin_api.Nat44InterfaceOutputFeatureDump{} - reqContext := vppChan.SendMultiRequest(req) + reqContext := handler.dumpChannel.SendMultiRequest(req) for { msg := &bin_api.Nat44InterfaceOutputFeatureDetails{} @@ -377,7 +358,7 @@ func nat44InterfaceOutputFeatureDump(swIfIndices ifaceidx.SwIfIndex, log logging // Find interface name ifName, _, found := swIfIndices.LookupName(msg.SwIfIndex) if !found { - log.Warnf("Interface with index %d not found in the mapping", msg.SwIfIndex) + handler.log.Warnf("Interface with index %d not found in the mapping", msg.SwIfIndex) continue } @@ -388,37 +369,37 @@ func nat44InterfaceOutputFeatureDump(swIfIndices ifaceidx.SwIfIndex, log logging }) } - log.Debugf("NAT44 interface with output feature dump complete, found %d entries", len(ifaces)) + handler.log.Debugf("NAT44 interface with output feature dump complete, found %d entries", len(ifaces)) return ifaces, nil } // Nat44IsForwardingEnabled returns a list of interfaces enabled for NAT44 -func nat44IsForwardingEnabled(log logging.Logger, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (isEnabled bool, err error) { +func (handler *natVppHandler) nat44IsForwardingEnabled() (isEnabled bool, err error) { defer func(t time.Time) { - stopwatch.TimeLog(bin_api.Nat44ForwardingIsEnabled{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(bin_api.Nat44ForwardingIsEnabled{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &bin_api.Nat44ForwardingIsEnabled{} reply := &bin_api.Nat44ForwardingIsEnabledReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.dumpChannel.SendRequest(req).ReceiveReply(reply); err != nil { return false, fmt.Errorf("failed to dump forwarding: %v", err) } isEnabled = uintToBool(reply.Enabled) - log.Debugf("NAT44 forwarding dump complete, is enabled: %v", isEnabled) + handler.log.Debugf("NAT44 forwarding dump complete, is enabled: %v", isEnabled) return isEnabled, nil } // Common function can process all static and identity mappings -func processDNatData(tag string, data interface{}, dNatCfgs *[]*nat.Nat44DNat_DNatConfig, log logging.Logger) { +func (handler *natVppHandler) processDNatData(tag string, data interface{}, dNatCfgs *[]*nat.Nat44DNat_DNatConfig) { if tag == "" { - log.Errorf("Cannot process DNAT config without tag") + handler.log.Errorf("Cannot process DNAT config without tag") return } - label := getDnatLabel(tag, log) + label := handler.getDnatLabel(tag) // Look for DNAT config using tag var dNat *nat.Nat44DNat_DNatConfig @@ -436,39 +417,39 @@ func processDNatData(tag string, data interface{}, dNatCfgs *[]*nat.Nat44DNat_DN IdMappings: make([]*nat.Nat44DNat_DNatConfig_IdentityMapping, 0), } *dNatCfgs = append(*dNatCfgs, dNat) - log.Debugf("Created new DNAT configuration %s", label) + handler.log.Debugf("Created new DNAT configuration %s", label) } // Add data to config switch mapping := data.(type) { case *nat.Nat44DNat_DNatConfig_StaticMapping: - log.Debugf("Static mapping added to DNAT %s", label) + handler.log.Debugf("Static mapping added to DNAT %s", label) dNat.StMappings = append(dNat.StMappings, mapping) case *nat.Nat44DNat_DNatConfig_IdentityMapping: - log.Debugf("Identity mapping added to DNAT %s", label) + handler.log.Debugf("Identity mapping added to DNAT %s", label) dNat.IdMappings = append(dNat.IdMappings, mapping) } } // returns NAT numeric representation of provided protocol value -func getNatProtocol(protocol uint8, log logging.Logger) (proto nat.Protocol) { +func (handler *natVppHandler) getNatProtocol(protocol uint8) (proto nat.Protocol) { switch protocol { - case vppcalls.TCP: + case TCP: return nat.Protocol_TCP - case vppcalls.UDP: + case UDP: return nat.Protocol_UDP - case vppcalls.ICMP: + case ICMP: return nat.Protocol_ICMP default: - log.Warnf("Unknown protocol %v", protocol) + handler.log.Warnf("Unknown protocol %v", protocol) return 0 } } -func getTwiceNatMode(twiceNat, selfTwiceNat uint8, log logging.Logger) nat.TwiceNatMode { +func (handler *natVppHandler) getTwiceNatMode(twiceNat, selfTwiceNat uint8) nat.TwiceNatMode { if twiceNat > 0 { if selfTwiceNat > 0 { - log.Warnf("Both TwiceNAT and self-TwiceNAT are enabled") + handler.log.Warnf("Both TwiceNAT and self-TwiceNAT are enabled") return 0 } return nat.TwiceNatMode_ENABLED @@ -487,15 +468,15 @@ func uintToBool(value uint8) bool { } // Obtain DNAT label from provided tag -func getDnatLabel(tag string, log logging.Logger) (label string) { +func (handler *natVppHandler) getDnatLabel(tag string) (label string) { parts := strings.Split(tag, "|") // Tag should be in format label|mappingType|index if len(parts) == 0 { - log.Errorf("Unable to obtain DNAT label, incorrect mapping tag format: '%s'", tag) + handler.log.Errorf("Unable to obtain DNAT label, incorrect mapping tag format: '%s'", tag) return } if len(parts) != 3 { - log.Warnf("Mapping tag has unexpected format: %s. Resolved DNAT label may not be correct", tag) + handler.log.Warnf("Mapping tag has unexpected format: %s. Resolved DNAT label may not be correct", tag) } return parts[0] } diff --git a/plugins/vpp/ifplugin/vppdump/dump_nat_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls_test.go similarity index 78% rename from plugins/vpp/ifplugin/vppdump/dump_nat_vppcalls_test.go rename to plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls_test.go index 11ad653890..c3ec6adfb1 100644 --- a/plugins/vpp/ifplugin/vppdump/dump_nat_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package vppdump +package vppcalls_test import ( "testing" @@ -24,11 +24,13 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/tests/vppcallmock" + "github.com/ligato/cn-infra/logging/measure" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" . "github.com/onsi/gomega" ) func TestNat44InterfaceDump(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bin_api.Nat44InterfaceDetails{ @@ -40,14 +42,14 @@ func TestNat44InterfaceDump(t *testing.T) { swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test-sw_if_indexes", ifaceidx.IndexMetadata)) swIfIndexes.RegisterName("if0", 1, nil) - ifaces, err := nat44InterfaceDump(swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + ifaces, err := natHandler.Nat44InterfaceDump(swIfIndexes) Expect(err).To(Succeed()) Expect(ifaces).To(HaveLen(1)) Expect(ifaces[0].IsInside).To(BeFalse()) } func TestNat44InterfaceDump2(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bin_api.Nat44InterfaceDetails{ @@ -59,14 +61,14 @@ func TestNat44InterfaceDump2(t *testing.T) { swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test-sw_if_indexes", ifaceidx.IndexMetadata)) swIfIndexes.RegisterName("if0", 1, nil) - ifaces, err := nat44InterfaceDump(swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + ifaces, err := natHandler.Nat44InterfaceDump(swIfIndexes) Expect(err).To(Succeed()) Expect(ifaces).To(HaveLen(1)) Expect(ifaces[0].IsInside).To(BeTrue()) } func TestNat44InterfaceDump3(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bin_api.Nat44InterfaceDetails{ @@ -78,9 +80,16 @@ func TestNat44InterfaceDump3(t *testing.T) { swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test-sw_if_indexes", ifaceidx.IndexMetadata)) swIfIndexes.RegisterName("if0", 1, nil) - ifaces, err := nat44InterfaceDump(swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + ifaces, err := natHandler.Nat44InterfaceDump(swIfIndexes) Expect(err).To(Succeed()) Expect(ifaces).To(HaveLen(2)) Expect(ifaces[0].IsInside).To(BeFalse()) Expect(ifaces[1].IsInside).To(BeTrue()) } + +func natTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.NatVppAPI) { + ctx := vppcallmock.SetupTestCtx(t) + log := logrus.NewLogger("test-log") + natHandler := vppcalls.NewNatVppHandler(ctx.MockChannel, ctx.MockChannel, log, measure.NewStopwatch("test-stopwatch", log)) + return ctx, natHandler +} diff --git a/plugins/vpp/ifplugin/vppdump/dump_stn_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/dump_stn_vppcalls.go similarity index 70% rename from plugins/vpp/ifplugin/vppdump/dump_stn_vppcalls.go rename to plugins/vpp/ifplugin/vppcalls/dump_stn_vppcalls.go index 7a9a2e8ae4..a340414278 100644 --- a/plugins/vpp/ifplugin/vppdump/dump_stn_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_stn_vppcalls.go @@ -12,24 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -package vppdump +package vppcalls import ( "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/stn" ) -// DumpStnRules returns a list of all STN rules configured on the VPP -func DumpStnRules(vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (rules []*stn.StnRulesDetails, err error) { +func (handler *stnVppHandler) DumpStnRules() (rules []*stn.StnRulesDetails, err error) { defer func(t time.Time) { - stopwatch.TimeLog(stn.StnRulesDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(stn.StnRulesDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &stn.StnRulesDump{} - reqCtx := vppChan.SendMultiRequest(req) + reqCtx := handler.callsChannel.SendMultiRequest(req) for { msg := &stn.StnRulesDetails{} stop, err := reqCtx.ReceiveReply(msg) diff --git a/plugins/vpp/ifplugin/vppcalls/ip_container_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/ip_container_vppcalls.go index a94b1c5df1..c4991b5880 100644 --- a/plugins/vpp/ifplugin/vppcalls/ip_container_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/ip_container_vppcalls.go @@ -29,9 +29,9 @@ const ( removeContainerIP uint8 = 0 ) -func sendAndLogMessageForVpp(ifIdx uint32, addr string, isAdd uint8, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ifVppHandler) sendAndLogMessageForVpp(ifIdx uint32, addr string, isAdd uint8) error { defer func(t time.Time) { - stopwatch.TimeLog(ip.IPContainerProxyAddDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(ip.IPContainerProxyAddDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &ip.IPContainerProxyAddDel{ @@ -55,7 +55,7 @@ func sendAndLogMessageForVpp(ifIdx uint32, addr string, isAdd uint8, vppChan gov } reply := &ip.IPContainerProxyAddDelReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -65,12 +65,10 @@ func sendAndLogMessageForVpp(ifIdx uint32, addr string, isAdd uint8, vppChan gov return nil } -// AddContainerIP calls IPContainerProxyAddDel VPP API with IsAdd=1 -func AddContainerIP(ifIdx uint32, addr string, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return sendAndLogMessageForVpp(ifIdx, addr, addContainerIP, vppChan, stopwatch) +func (handler *ifVppHandler) AddContainerIP(ifIdx uint32, addr string) error { + return handler.sendAndLogMessageForVpp(ifIdx, addr, addContainerIP) } -// DelContainerIP calls IPContainerProxyAddDel VPP API with IsAdd=0 -func DelContainerIP(ifIdx uint32, addr string, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return sendAndLogMessageForVpp(ifIdx, addr, removeContainerIP, vppChan, stopwatch) +func (handler *ifVppHandler) DelContainerIP(ifIdx uint32, addr string) error { + return handler.sendAndLogMessageForVpp(ifIdx, addr, removeContainerIP) } diff --git a/plugins/vpp/ifplugin/vppcalls/ip_container_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/ip_container_vppcalls_test.go index 4ebacc4ecc..4f70ac6e2a 100644 --- a/plugins/vpp/ifplugin/vppcalls/ip_container_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/ip_container_vppcalls_test.go @@ -19,18 +19,16 @@ import ( "testing" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) func TestAddContainerIP(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPContainerProxyAddDelReply{}) - err := vppcalls.AddContainerIP(1, "10.0.0.1/24", ctx.MockChannel, nil) + err := ifHandler.AddContainerIP(1, "10.0.0.1/24") Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*ip.IPContainerProxyAddDel) @@ -43,12 +41,12 @@ func TestAddContainerIP(t *testing.T) { } func TestAddContainerIPv6(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPContainerProxyAddDelReply{}) - err := vppcalls.AddContainerIP(1, "2001:db8:0:1:1:1:1:1/128", ctx.MockChannel, nil) + err := ifHandler.AddContainerIP(1, "2001:db8:0:1:1:1:1:1/128") Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*ip.IPContainerProxyAddDel) @@ -61,47 +59,47 @@ func TestAddContainerIPv6(t *testing.T) { } func TestAddContainerIPInvalidIP(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPAddressDetails{}) - err := vppcalls.AddContainerIP(1, "invalid-ip", ctx.MockChannel, nil) + err := ifHandler.AddContainerIP(1, "invalid-ip") Expect(err).ToNot(BeNil()) } func TestAddContainerIPError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPAddressDetails{}) - err := vppcalls.AddContainerIP(1, "10.0.0.1/24", ctx.MockChannel, nil) + err := ifHandler.AddContainerIP(1, "10.0.0.1/24") Expect(err).ToNot(BeNil()) } func TestAddContainerIPRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPContainerProxyAddDelReply{ Retval: 1, }) - err := vppcalls.AddContainerIP(1, "10.0.0.1/24", ctx.MockChannel, nil) + err := ifHandler.AddContainerIP(1, "10.0.0.1/24") Expect(err).ToNot(BeNil()) } func TestDelContainerIP(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPContainerProxyAddDelReply{}) - err := vppcalls.DelContainerIP(1, "10.0.0.1/24", ctx.MockChannel, nil) + err := ifHandler.DelContainerIP(1, "10.0.0.1/24") Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*ip.IPContainerProxyAddDel) @@ -114,12 +112,12 @@ func TestDelContainerIP(t *testing.T) { } func TestDelContainerIPv6(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPContainerProxyAddDelReply{}) - err := vppcalls.DelContainerIP(1, "2001:db8:0:1:1:1:1:1/128", ctx.MockChannel, nil) + err := ifHandler.DelContainerIP(1, "2001:db8:0:1:1:1:1:1/128") Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*ip.IPContainerProxyAddDel) @@ -132,25 +130,25 @@ func TestDelContainerIPv6(t *testing.T) { } func TestDelContainerIPError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPAddressDetails{}) - err := vppcalls.DelContainerIP(1, "10.0.0.1/24", ctx.MockChannel, nil) + err := ifHandler.DelContainerIP(1, "10.0.0.1/24") Expect(err).ToNot(BeNil()) } func TestDelContainerIPRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPContainerProxyAddDelReply{ Retval: 1, }) - err := vppcalls.DelContainerIP(1, "10.0.0.1/24", ctx.MockChannel, nil) + err := ifHandler.DelContainerIP(1, "10.0.0.1/24") Expect(err).ToNot(BeNil()) } diff --git a/plugins/vpp/ifplugin/vppcalls/ip_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/ip_vppcalls.go index 84c2e6e999..04031a1611 100644 --- a/plugins/vpp/ifplugin/vppcalls/ip_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/ip_vppcalls.go @@ -19,8 +19,6 @@ import ( "net" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/cn-infra/utils/addrs" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" ) @@ -30,9 +28,9 @@ const ( delInterfaceIP uint8 = 0 ) -func addDelInterfaceIP(ifIdx uint32, addr *net.IPNet, isAdd uint8, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ifVppHandler) addDelInterfaceIP(ifIdx uint32, addr *net.IPNet, isAdd uint8) error { defer func(t time.Time) { - stopwatch.TimeLog(interfaces.SwInterfaceAddDelAddress{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(interfaces.SwInterfaceAddDelAddress{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &interfaces.SwInterfaceAddDelAddress{ @@ -56,7 +54,7 @@ func addDelInterfaceIP(ifIdx uint32, addr *net.IPNet, isAdd uint8, vppChan govpp } reply := &interfaces.SwInterfaceAddDelAddressReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -66,14 +64,12 @@ func addDelInterfaceIP(ifIdx uint32, addr *net.IPNet, isAdd uint8, vppChan govpp return nil } -// AddInterfaceIP calls SwInterfaceAddDelAddress bin API with IsAdd=1. -func AddInterfaceIP(ifIdx uint32, addr *net.IPNet, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return addDelInterfaceIP(ifIdx, addr, addInterfaceIP, vppChan, stopwatch) +func (handler *ifVppHandler) AddInterfaceIP(ifIdx uint32, addr *net.IPNet) error { + return handler.addDelInterfaceIP(ifIdx, addr, addInterfaceIP) } -// DelInterfaceIP calls SwInterfaceAddDelAddress bin API with IsAdd=00. -func DelInterfaceIP(ifIdx uint32, addr *net.IPNet, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return addDelInterfaceIP(ifIdx, addr, delInterfaceIP, vppChan, stopwatch) +func (handler *ifVppHandler) DelInterfaceIP(ifIdx uint32, addr *net.IPNet) error { + return handler.addDelInterfaceIP(ifIdx, addr, delInterfaceIP) } const ( @@ -81,9 +77,9 @@ const ( unsetUnnumberedIP uint8 = 0 ) -func setUnsetUnnumberedIP(uIfIdx uint32, ifIdxWithIP uint32, isAdd uint8, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ifVppHandler) setUnsetUnnumberedIP(uIfIdx uint32, ifIdxWithIP uint32, isAdd uint8) error { defer func(t time.Time) { - stopwatch.TimeLog(interfaces.SwInterfaceSetUnnumbered{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(interfaces.SwInterfaceSetUnnumbered{}).LogTimeEntry(time.Since(t)) }(time.Now()) // Prepare the message. @@ -94,7 +90,7 @@ func setUnsetUnnumberedIP(uIfIdx uint32, ifIdxWithIP uint32, isAdd uint8, vppCha } reply := &interfaces.SwInterfaceSetUnnumberedReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -104,12 +100,10 @@ func setUnsetUnnumberedIP(uIfIdx uint32, ifIdxWithIP uint32, isAdd uint8, vppCha return nil } -// SetUnnumberedIP sets interface as un-numbered, linking IP address of the another interface (ifIdxWithIP) -func SetUnnumberedIP(uIfIdx uint32, ifIdxWithIP uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return setUnsetUnnumberedIP(uIfIdx, ifIdxWithIP, setUnnumberedIP, vppChan, stopwatch) +func (handler *ifVppHandler) SetUnnumberedIP(uIfIdx uint32, ifIdxWithIP uint32) error { + return handler.setUnsetUnnumberedIP(uIfIdx, ifIdxWithIP, setUnnumberedIP) } -// UnsetUnnumberedIP unset provided interface as un-numbered. IP address of the linked interface is removed -func UnsetUnnumberedIP(uIfIdx uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return setUnsetUnnumberedIP(uIfIdx, 0, unsetUnnumberedIP, vppChan, stopwatch) +func (handler *ifVppHandler) UnsetUnnumberedIP(uIfIdx uint32) error { + return handler.setUnsetUnnumberedIP(uIfIdx, 0, unsetUnnumberedIP) } diff --git a/plugins/vpp/ifplugin/vppcalls/ip_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/ip_vppcalls_test.go index 7c4b9cb8d1..50c6469274 100644 --- a/plugins/vpp/ifplugin/vppcalls/ip_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/ip_vppcalls_test.go @@ -19,20 +19,18 @@ import ( "testing" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) func TestAddInterfaceIP(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceAddDelAddressReply{}) _, ipNet, err := net.ParseCIDR("10.0.0.1/24") Expect(err).To(BeNil()) - err = vppcalls.AddInterfaceIP(1, ipNet, ctx.MockChannel, nil) + err = ifHandler.AddInterfaceIP(1, ipNet) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*interfaces.SwInterfaceAddDelAddress) @@ -46,14 +44,14 @@ func TestAddInterfaceIP(t *testing.T) { } func TestAddInterfaceIPv6(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceAddDelAddressReply{}) _, ipNet, err := net.ParseCIDR("2001:db8:0:1:1:1:1:1/128") Expect(err).To(BeNil()) - err = vppcalls.AddInterfaceIP(1, ipNet, ctx.MockChannel, nil) + err = ifHandler.AddInterfaceIP(1, ipNet) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*interfaces.SwInterfaceAddDelAddress) @@ -67,32 +65,32 @@ func TestAddInterfaceIPv6(t *testing.T) { } func TestAddInterfaceInvalidIP(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceAddDelAddressReply{}) - err := vppcalls.AddInterfaceIP(1, &net.IPNet{ + err := ifHandler.AddInterfaceIP(1, &net.IPNet{ IP: []byte("invalid-ip"), - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestAddInterfaceIPError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() _, ipNet, err := net.ParseCIDR("10.0.0.1/24") ctx.MockVpp.MockReply(&interfaces.SwInterfaceAddDelAddress{}) - err = vppcalls.AddInterfaceIP(1, ipNet, ctx.MockChannel, nil) + err = ifHandler.AddInterfaceIP(1, ipNet) Expect(err).ToNot(BeNil()) } func TestAddInterfaceIPRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() _, ipNet, err := net.ParseCIDR("10.0.0.1/24") @@ -100,20 +98,20 @@ func TestAddInterfaceIPRetval(t *testing.T) { Retval: 1, }) - err = vppcalls.AddInterfaceIP(1, ipNet, ctx.MockChannel, nil) + err = ifHandler.AddInterfaceIP(1, ipNet) Expect(err).ToNot(BeNil()) } func TestDelInterfaceIP(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceAddDelAddressReply{}) _, ipNet, err := net.ParseCIDR("10.0.0.1/24") Expect(err).To(BeNil()) - err = vppcalls.DelInterfaceIP(1, ipNet, ctx.MockChannel, nil) + err = ifHandler.DelInterfaceIP(1, ipNet) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*interfaces.SwInterfaceAddDelAddress) @@ -127,14 +125,14 @@ func TestDelInterfaceIP(t *testing.T) { } func TestDelInterfaceIPv6(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceAddDelAddressReply{}) _, ipNet, err := net.ParseCIDR("2001:db8:0:1:1:1:1:1/128") Expect(err).To(BeNil()) - err = vppcalls.DelInterfaceIP(1, ipNet, ctx.MockChannel, nil) + err = ifHandler.DelInterfaceIP(1, ipNet) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*interfaces.SwInterfaceAddDelAddress) @@ -148,32 +146,32 @@ func TestDelInterfaceIPv6(t *testing.T) { } func TestDelInterfaceInvalidIP(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceAddDelAddressReply{}) - err := vppcalls.DelInterfaceIP(1, &net.IPNet{ + err := ifHandler.DelInterfaceIP(1, &net.IPNet{ IP: []byte("invalid-ip"), - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestDelInterfaceIPError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() _, ipNet, err := net.ParseCIDR("10.0.0.1/24") ctx.MockVpp.MockReply(&interfaces.SwInterfaceAddDelAddress{}) - err = vppcalls.DelInterfaceIP(1, ipNet, ctx.MockChannel, nil) + err = ifHandler.DelInterfaceIP(1, ipNet) Expect(err).ToNot(BeNil()) } func TestDelInterfaceIPRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() _, ipNet, err := net.ParseCIDR("10.0.0.1/24") @@ -181,18 +179,18 @@ func TestDelInterfaceIPRetval(t *testing.T) { Retval: 1, }) - err = vppcalls.DelInterfaceIP(1, ipNet, ctx.MockChannel, nil) + err = ifHandler.DelInterfaceIP(1, ipNet) Expect(err).ToNot(BeNil()) } func TestSetUnnumberedIP(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetUnnumberedReply{}) - err := vppcalls.SetUnnumberedIP(1, 2, ctx.MockChannel, nil) + err := ifHandler.SetUnnumberedIP(1, 2) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*interfaces.SwInterfaceSetUnnumbered) @@ -203,36 +201,36 @@ func TestSetUnnumberedIP(t *testing.T) { } func TestSetUnnumberedIPError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetUnnumbered{}) - err := vppcalls.SetUnnumberedIP(1, 2, ctx.MockChannel, nil) + err := ifHandler.SetUnnumberedIP(1, 2) Expect(err).ToNot(BeNil()) } func TestSetUnnumberedIPRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetUnnumberedReply{ Retval: 1, }) - err := vppcalls.SetUnnumberedIP(1, 2, ctx.MockChannel, nil) + err := ifHandler.SetUnnumberedIP(1, 2) Expect(err).ToNot(BeNil()) } func TestUnsetUnnumberedIP(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetUnnumberedReply{}) - err := vppcalls.UnsetUnnumberedIP(1, ctx.MockChannel, nil) + err := ifHandler.UnsetUnnumberedIP(1) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*interfaces.SwInterfaceSetUnnumbered) @@ -243,25 +241,25 @@ func TestUnsetUnnumberedIP(t *testing.T) { } func TestUnsetUnnumberedIPError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetUnnumbered{}) - err := vppcalls.UnsetUnnumberedIP(1, ctx.MockChannel, nil) + err := ifHandler.UnsetUnnumberedIP(1) Expect(err).ToNot(BeNil()) } func TestUnsetUnnumberedIPRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetUnnumberedReply{ Retval: 1, }) - err := vppcalls.UnsetUnnumberedIP(1, ctx.MockChannel, nil) + err := ifHandler.UnsetUnnumberedIP(1) Expect(err).ToNot(BeNil()) } diff --git a/plugins/vpp/ifplugin/vppcalls/loopback_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/loopback_vppcalls.go index bee6f63e4f..d6251f3899 100644 --- a/plugins/vpp/ifplugin/vppcalls/loopback_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/loopback_vppcalls.go @@ -18,34 +18,30 @@ import ( "fmt" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" ) -// AddLoopbackInterface calls CreateLoopback bin API. -func AddLoopbackInterface(ifName string, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (swIndex uint32, err error) { +func (handler *ifVppHandler) AddLoopbackInterface(ifName string) (swIndex uint32, err error) { defer func(t time.Time) { - stopwatch.TimeLog(interfaces.CreateLoopback{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(interfaces.CreateLoopback{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &interfaces.CreateLoopback{} reply := &interfaces.CreateLoopbackReply{} - if err = vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err = handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return 0, err } if reply.Retval != 0 { return 0, fmt.Errorf("%s returned %d", reply.GetMessageName(), reply.Retval) } - return reply.SwIfIndex, SetInterfaceTag(ifName, reply.SwIfIndex, vppChan, stopwatch) + return reply.SwIfIndex, handler.SetInterfaceTag(ifName, reply.SwIfIndex) } -// DeleteLoopbackInterface calls DeleteLoopback bin API. -func DeleteLoopbackInterface(ifName string, idx uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ifVppHandler) DeleteLoopbackInterface(ifName string, idx uint32) error { defer func(t time.Time) { - stopwatch.TimeLog(interfaces.DeleteLoopback{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(interfaces.DeleteLoopback{}).LogTimeEntry(time.Since(t)) }(time.Now()) // Prepare the message. @@ -54,12 +50,12 @@ func DeleteLoopbackInterface(ifName string, idx uint32, vppChan govppapi.Channel } reply := &interfaces.DeleteLoopbackReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { return fmt.Errorf("%s returned %d", reply.GetMessageName(), reply.Retval) } - return RemoveInterfaceTag(ifName, idx, vppChan, stopwatch) + return handler.RemoveInterfaceTag(ifName, idx) } diff --git a/plugins/vpp/ifplugin/vppcalls/loopback_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/loopback_vppcalls_test.go index 79f72120c0..60161cddc0 100644 --- a/plugins/vpp/ifplugin/vppcalls/loopback_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/loopback_vppcalls_test.go @@ -18,13 +18,11 @@ import ( "testing" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) func TestAddLoopbackInterface(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.CreateLoopbackReply{ @@ -32,70 +30,70 @@ func TestAddLoopbackInterface(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - swIfIdx, err := vppcalls.AddLoopbackInterface("loopback", ctx.MockChannel, nil) + swIfIdx, err := ifHandler.AddLoopbackInterface("loopback") Expect(err).To(BeNil()) Expect(swIfIdx).To(BeEquivalentTo(1)) } func TestAddLoopbackInterfaceError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.CreateLoopback{}) - swIfIdx, err := vppcalls.AddLoopbackInterface("loopback", ctx.MockChannel, nil) + swIfIdx, err := ifHandler.AddLoopbackInterface("loopback") Expect(err).ToNot(BeNil()) Expect(swIfIdx).To(BeEquivalentTo(0)) } func TestAddLoopbackInterfaceRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.CreateLoopbackReply{ Retval: 1, }) - swIfIdx, err := vppcalls.AddLoopbackInterface("loopback", ctx.MockChannel, nil) + swIfIdx, err := ifHandler.AddLoopbackInterface("loopback") Expect(err).ToNot(BeNil()) Expect(swIfIdx).To(BeEquivalentTo(0)) } func TestDeleteLoopbackInterface(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.DeleteLoopbackReply{}) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - err := vppcalls.DeleteLoopbackInterface("loopback", 1, ctx.MockChannel, nil) + err := ifHandler.DeleteLoopbackInterface("loopback", 1) Expect(err).To(BeNil()) } func TestDeleteLoopbackInterfaceError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.DeleteLoopback{}) - err := vppcalls.DeleteLoopbackInterface("loopback", 1, ctx.MockChannel, nil) + err := ifHandler.DeleteLoopbackInterface("loopback", 1) Expect(err).ToNot(BeNil()) } func TestDeleteLoopbackInterfaceRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.DeleteLoopbackReply{ Retval: 1, }) - err := vppcalls.DeleteLoopbackInterface("loopback", 1, ctx.MockChannel, nil) + err := ifHandler.DeleteLoopbackInterface("loopback", 1) Expect(err).ToNot(BeNil()) } diff --git a/plugins/vpp/ifplugin/vppcalls/mac_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/mac_vppcalls.go index 229a62ecda..125bf6e3c4 100644 --- a/plugins/vpp/ifplugin/vppcalls/mac_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/mac_vppcalls.go @@ -19,15 +19,12 @@ import ( "net" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" ) -// SetInterfaceMac calls SwInterfaceSetMacAddress bin API. -func SetInterfaceMac(ifIdx uint32, macAddress string, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ifVppHandler) SetInterfaceMac(ifIdx uint32, macAddress string) error { defer func(t time.Time) { - stopwatch.TimeLog(interfaces.SwInterfaceSetMacAddress{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(interfaces.SwInterfaceSetMacAddress{}).LogTimeEntry(time.Since(t)) }(time.Now()) mac, err := net.ParseMAC(macAddress) @@ -41,7 +38,7 @@ func SetInterfaceMac(ifIdx uint32, macAddress string, vppChan govppapi.Channel, } reply := &interfaces.SwInterfaceSetMacAddressReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { diff --git a/plugins/vpp/ifplugin/vppcalls/mac_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/mac_vppcalls_test.go index 21d6c92449..0e9f3a4128 100644 --- a/plugins/vpp/ifplugin/vppcalls/mac_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/mac_vppcalls_test.go @@ -12,25 +12,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -package vppcalls +package vppcalls_test import ( "net" "testing" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) func TestSetInterfaceMac(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetMacAddressReply{}) mac, _ := net.ParseMAC("65:77:BF:72:C9:8D") - err := SetInterfaceMac(1, "65:77:BF:72:C9:8D", ctx.MockChannel, nil) + err := ifHandler.SetInterfaceMac(1, "65:77:BF:72:C9:8D") Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*interfaces.SwInterfaceSetMacAddress) @@ -40,36 +39,36 @@ func TestSetInterfaceMac(t *testing.T) { } func TestSetInterfaceInvalidMac(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetMacAddress{}) - err := SetInterfaceMac(1, "invalid-mac", ctx.MockChannel, nil) + err := ifHandler.SetInterfaceMac(1, "invalid-mac") Expect(err).ToNot(BeNil()) } func TestSetInterfaceMacError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetMacAddress{}) - err := SetInterfaceMac(1, "65:77:BF:72:C9:8D", ctx.MockChannel, nil) + err := ifHandler.SetInterfaceMac(1, "65:77:BF:72:C9:8D") Expect(err).ToNot(BeNil()) } func TestSetInterfaceMacRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetMacAddressReply{ Retval: 1, }) - err := SetInterfaceMac(1, "65:77:BF:72:C9:8D", ctx.MockChannel, nil) + err := ifHandler.SetInterfaceMac(1, "65:77:BF:72:C9:8D") Expect(err).ToNot(BeNil()) } diff --git a/plugins/vpp/ifplugin/vppcalls/memif_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/memif_vppcalls.go index 3c3b98c812..2016677c51 100644 --- a/plugins/vpp/ifplugin/vppcalls/memif_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/memif_vppcalls.go @@ -18,16 +18,13 @@ import ( "fmt" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/memif" intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" ) -// AddMemifInterface calls MemifCreate bin API. -func AddMemifInterface(ifName string, memIface *intf.Interfaces_Interface_Memif, socketID uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (swIdx uint32, err error) { +func (handler *ifVppHandler) AddMemifInterface(ifName string, memIface *intf.Interfaces_Interface_Memif, socketID uint32) (swIdx uint32, err error) { defer func(t time.Time) { - stopwatch.TimeLog(memif.MemifCreate{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(memif.MemifCreate{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &memif.MemifCreate{ @@ -54,20 +51,19 @@ func AddMemifInterface(ifName string, memIface *intf.Interfaces_Interface_Memif, } reply := &memif.MemifCreateReply{} - if err = vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err = handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return 0, err } if reply.Retval != 0 { return 0, fmt.Errorf("%s returned %d", reply.GetMessageName(), reply.Retval) } - return reply.SwIfIndex, SetInterfaceTag(ifName, reply.SwIfIndex, vppChan, stopwatch) + return reply.SwIfIndex, handler.SetInterfaceTag(ifName, reply.SwIfIndex) } -// DeleteMemifInterface calls MemifDelete bin API. -func DeleteMemifInterface(ifName string, idx uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ifVppHandler) DeleteMemifInterface(ifName string, idx uint32) error { defer func(t time.Time) { - stopwatch.TimeLog(memif.MemifDelete{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(memif.MemifDelete{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &memif.MemifDelete{ @@ -75,20 +71,19 @@ func DeleteMemifInterface(ifName string, idx uint32, vppChan govppapi.Channel, s } reply := &memif.MemifDeleteReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { return fmt.Errorf("%s returned %d", reply.GetMessageName(), reply.Retval) } - return RemoveInterfaceTag(ifName, idx, vppChan, stopwatch) + return handler.RemoveInterfaceTag(ifName, idx) } -// RegisterMemifSocketFilename registers new socket file name with provided ID. -func RegisterMemifSocketFilename(filename []byte, id uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ifVppHandler) RegisterMemifSocketFilename(filename []byte, id uint32) error { defer func(t time.Time) { - stopwatch.TimeLog(memif.MemifSocketFilenameAddDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(memif.MemifSocketFilenameAddDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &memif.MemifSocketFilenameAddDel{ @@ -98,7 +93,7 @@ func RegisterMemifSocketFilename(filename []byte, id uint32, vppChan govppapi.Ch } reply := &memif.MemifSocketFilenameAddDelReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { diff --git a/plugins/vpp/ifplugin/vppcalls/memif_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/memif_vppcalls_test.go index 6e4248d77d..963aba1a94 100644 --- a/plugins/vpp/ifplugin/vppcalls/memif_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/memif_vppcalls_test.go @@ -19,14 +19,12 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/binapi/memif" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" ifModel "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) func TestAddMasterMemifInterface(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&memif.MemifCreateReply{ @@ -34,12 +32,12 @@ func TestAddMasterMemifInterface(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - swIfIdx, err := vppcalls.AddMemifInterface("memif", &ifModel.Interfaces_Interface_Memif{ + swIfIdx, err := ifHandler.AddMemifInterface("memif", &ifModel.Interfaces_Interface_Memif{ Id: 1, Mode: ifModel.Interfaces_Interface_Memif_IP, Secret: "secret", Master: true, - }, 5, ctx.MockChannel, nil) + }, 5) Expect(err).To(BeNil()) Expect(swIfIdx).To(BeEquivalentTo(1)) @@ -60,7 +58,7 @@ func TestAddMasterMemifInterface(t *testing.T) { } func TestAddMasterMemifInterfaceAsSlave(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&memif.MemifCreateReply{ @@ -68,12 +66,12 @@ func TestAddMasterMemifInterfaceAsSlave(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - swIfIdx, err := vppcalls.AddMemifInterface("memif", &ifModel.Interfaces_Interface_Memif{ + swIfIdx, err := ifHandler.AddMemifInterface("memif", &ifModel.Interfaces_Interface_Memif{ Id: 1, Mode: ifModel.Interfaces_Interface_Memif_IP, Secret: "secret", Master: false, - }, 5, ctx.MockChannel, nil) + }, 5) Expect(err).To(BeNil()) Expect(swIfIdx).To(BeEquivalentTo(1)) @@ -89,24 +87,24 @@ func TestAddMasterMemifInterfaceAsSlave(t *testing.T) { } func TestAddMasterMemifInterfaceError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&memif.MemifCreate{}) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - _, err := vppcalls.AddMemifInterface("memif", &ifModel.Interfaces_Interface_Memif{ + _, err := ifHandler.AddMemifInterface("memif", &ifModel.Interfaces_Interface_Memif{ Id: 1, Mode: ifModel.Interfaces_Interface_Memif_IP, Secret: "secret", Master: false, - }, 5, ctx.MockChannel, nil) + }, 5) Expect(err).ToNot(BeNil()) } func TestAddMasterMemifInterfaceRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&memif.MemifCreateReply{ @@ -114,42 +112,42 @@ func TestAddMasterMemifInterfaceRetval(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - _, err := vppcalls.AddMemifInterface("memif", &ifModel.Interfaces_Interface_Memif{ + _, err := ifHandler.AddMemifInterface("memif", &ifModel.Interfaces_Interface_Memif{ Id: 1, Mode: ifModel.Interfaces_Interface_Memif_IP, Secret: "secret", Master: false, - }, 5, ctx.MockChannel, nil) + }, 5) Expect(err).ToNot(BeNil()) } func TestDeleteMemifInterface(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&memif.MemifDeleteReply{}) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - err := vppcalls.DeleteMemifInterface("memif", 1, ctx.MockChannel, nil) + err := ifHandler.DeleteMemifInterface("memif", 1) Expect(err).To(BeNil()) } func TestDeleteMemifInterfaceError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&memif.MemifDelete{}) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - err := vppcalls.DeleteMemifInterface("memif", 1, ctx.MockChannel, nil) + err := ifHandler.DeleteMemifInterface("memif", 1) Expect(err).ToNot(BeNil()) } func TestDeleteMemifInterfaceRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&memif.MemifDeleteReply{ @@ -157,18 +155,18 @@ func TestDeleteMemifInterfaceRetval(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - err := vppcalls.DeleteMemifInterface("memif", 1, ctx.MockChannel, nil) + err := ifHandler.DeleteMemifInterface("memif", 1) Expect(err).ToNot(BeNil()) } func TestRegisterMemifSocketFilename(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&memif.MemifSocketFilenameAddDelReply{}) - err := vppcalls.RegisterMemifSocketFilename([]byte("filename"), 1, ctx.MockChannel, nil) + err := ifHandler.RegisterMemifSocketFilename([]byte("filename"), 1) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*memif.MemifSocketFilenameAddDel) @@ -179,25 +177,25 @@ func TestRegisterMemifSocketFilename(t *testing.T) { } func TestRegisterMemifSocketFilenameError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&memif.MemifSocketFilenameAddDel{}) - err := vppcalls.RegisterMemifSocketFilename([]byte("filename"), 1, ctx.MockChannel, nil) + err := ifHandler.RegisterMemifSocketFilename([]byte("filename"), 1) Expect(err).ToNot(BeNil()) } func TestRegisterMemifSocketFilenameRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&memif.MemifSocketFilenameAddDelReply{ Retval: 1, }) - err := vppcalls.RegisterMemifSocketFilename([]byte("filename"), 1, ctx.MockChannel, nil) + err := ifHandler.RegisterMemifSocketFilename([]byte("filename"), 1) Expect(err).ToNot(BeNil()) } diff --git a/plugins/vpp/ifplugin/vppcalls/mtu_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/mtu_vppcalls.go index 68f72541a7..db8b7bc2e5 100644 --- a/plugins/vpp/ifplugin/vppcalls/mtu_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/mtu_vppcalls.go @@ -18,15 +18,12 @@ import ( "fmt" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" ) -// SetInterfaceMtu calls HwInterfaceSetMtu bin API with desired MTU value. -func SetInterfaceMtu(ifIdx uint32, mtu uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ifVppHandler) SetInterfaceMtu(ifIdx uint32, mtu uint32) error { defer func(t time.Time) { - stopwatch.TimeLog(interfaces.HwInterfaceSetMtu{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(interfaces.HwInterfaceSetMtu{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &interfaces.HwInterfaceSetMtu{ @@ -35,7 +32,7 @@ func SetInterfaceMtu(ifIdx uint32, mtu uint32, vppChan govppapi.Channel, stopwat } reply := &interfaces.HwInterfaceSetMtuReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { diff --git a/plugins/vpp/ifplugin/vppcalls/mtu_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/mtu_vppcalls_test.go index fc665d541d..8dfee7513f 100644 --- a/plugins/vpp/ifplugin/vppcalls/mtu_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/mtu_vppcalls_test.go @@ -18,18 +18,16 @@ import ( "testing" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) func TestSetInterfaceMtu(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.HwInterfaceSetMtuReply{}) - err := vppcalls.SetInterfaceMtu(1, 1500, ctx.MockChannel, nil) + err := ifHandler.SetInterfaceMtu(1, 1500) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*interfaces.HwInterfaceSetMtu) @@ -39,25 +37,25 @@ func TestSetInterfaceMtu(t *testing.T) { } func TestSetInterfaceMtuError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.HwInterfaceSetMtu{}) - err := vppcalls.SetInterfaceMtu(1, 1500, ctx.MockChannel, nil) + err := ifHandler.SetInterfaceMtu(1, 1500) Expect(err).ToNot(BeNil()) } func TestSetInterfaceMtuRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.HwInterfaceSetMtuReply{ Retval: 1, }) - err := vppcalls.SetInterfaceMtu(1, 1500, ctx.MockChannel, nil) + err := ifHandler.SetInterfaceMtu(1, 1500) Expect(err).ToNot(BeNil()) } diff --git a/plugins/vpp/ifplugin/vppcalls/nat_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/nat_vppcalls.go index 10a3c64721..ab0955d31f 100644 --- a/plugins/vpp/ifplugin/vppcalls/nat_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/nat_vppcalls.go @@ -18,8 +18,6 @@ import ( "fmt" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/nat" ) @@ -78,10 +76,9 @@ type LocalLbAddress struct { Probability uint8 } -// SetNat44Forwarding configures global forwarding setup for NAT44 -func SetNat44Forwarding(enableFwd bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *natVppHandler) SetNat44Forwarding(enableFwd bool) error { defer func(t time.Time) { - stopwatch.TimeLog(nat.Nat44ForwardingEnableDisable{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(nat.Nat44ForwardingEnableDisable{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &nat.Nat44ForwardingEnableDisable{ @@ -89,7 +86,7 @@ func SetNat44Forwarding(enableFwd bool, vppChan govppapi.Channel, stopwatch *mea } reply := &nat.Nat44ForwardingEnableDisableReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -100,9 +97,9 @@ func SetNat44Forwarding(enableFwd bool, vppChan govppapi.Channel, stopwatch *mea } // Calls VPP binary API to set/unset interface as NAT -func handleNat44Interface(ifIdx uint32, isInside, isAdd bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *natVppHandler) handleNat44Interface(ifIdx uint32, isInside, isAdd bool) error { defer func(t time.Time) { - stopwatch.TimeLog(nat.Nat44InterfaceAddDelFeature{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(nat.Nat44InterfaceAddDelFeature{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &nat.Nat44InterfaceAddDelFeature{ @@ -112,7 +109,7 @@ func handleNat44Interface(ifIdx uint32, isInside, isAdd bool, vppChan govppapi.C } reply := &nat.Nat44InterfaceAddDelFeatureReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -123,9 +120,9 @@ func handleNat44Interface(ifIdx uint32, isInside, isAdd bool, vppChan govppapi.C } // Calls VPP binary API to set/unset interface as NAT with output feature -func handleNat44InterfaceOutputFeature(ifIdx uint32, isInside, isAdd bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *natVppHandler) handleNat44InterfaceOutputFeature(ifIdx uint32, isInside, isAdd bool) error { defer func(t time.Time) { - stopwatch.TimeLog(nat.Nat44InterfaceAddDelOutputFeature{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(nat.Nat44InterfaceAddDelOutputFeature{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &nat.Nat44InterfaceAddDelOutputFeature{ @@ -135,7 +132,7 @@ func handleNat44InterfaceOutputFeature(ifIdx uint32, isInside, isAdd bool, vppCh } reply := &nat.Nat44InterfaceAddDelOutputFeatureReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -146,9 +143,9 @@ func handleNat44InterfaceOutputFeature(ifIdx uint32, isInside, isAdd bool, vppCh } // Calls VPP binary API to add/remove address pool -func handleNat44AddressPool(first, last []byte, vrf uint32, twiceNat, isAdd bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *natVppHandler) handleNat44AddressPool(first, last []byte, vrf uint32, twiceNat, isAdd bool) error { defer func(t time.Time) { - stopwatch.TimeLog(nat.Nat44AddDelAddressRange{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(nat.Nat44AddDelAddressRange{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &nat.Nat44AddDelAddressRange{ @@ -160,7 +157,7 @@ func handleNat44AddressPool(first, last []byte, vrf uint32, twiceNat, isAdd bool } reply := &nat.Nat44AddDelAddressRangeReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -171,9 +168,9 @@ func handleNat44AddressPool(first, last []byte, vrf uint32, twiceNat, isAdd bool } // Calls VPP binary API to add/remove static mapping -func handleNat44StaticMapping(ctx *StaticMappingContext, isAdd, addrOnly bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *natVppHandler) handleNat44StaticMapping(ctx *StaticMappingContext, isAdd, addrOnly bool) error { defer func(t time.Time) { - stopwatch.TimeLog(nat.Nat44AddDelStaticMapping{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(nat.Nat44AddDelStaticMapping{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &nat.Nat44AddDelStaticMapping{ @@ -198,7 +195,7 @@ func handleNat44StaticMapping(ctx *StaticMappingContext, isAdd, addrOnly bool, v } reply := &nat.Nat44AddDelStaticMappingReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -209,9 +206,9 @@ func handleNat44StaticMapping(ctx *StaticMappingContext, isAdd, addrOnly bool, v } // Calls VPP binary API to add/remove static mapping with load balancer -func handleNat44StaticMappingLb(ctx *StaticMappingLbContext, isAdd bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *natVppHandler) handleNat44StaticMappingLb(ctx *StaticMappingLbContext, isAdd bool) error { defer func(t time.Time) { - stopwatch.TimeLog(nat.Nat44AddDelLbStaticMapping{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(nat.Nat44AddDelLbStaticMapping{}).LogTimeEntry(time.Since(t)) }(time.Now()) // Transform local IP/Ports @@ -240,7 +237,7 @@ func handleNat44StaticMappingLb(ctx *StaticMappingLbContext, isAdd bool, vppChan } reply := &nat.Nat44AddDelLbStaticMappingReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -251,9 +248,9 @@ func handleNat44StaticMappingLb(ctx *StaticMappingLbContext, isAdd bool, vppChan } // Calls VPP binary API to add/remove identity mapping -func handleNat44IdentityMapping(ctx *IdentityMappingContext, isAdd bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *natVppHandler) handleNat44IdentityMapping(ctx *IdentityMappingContext, isAdd bool) error { defer func(t time.Time) { - stopwatch.TimeLog(nat.Nat44AddDelIdentityMapping{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(nat.Nat44AddDelIdentityMapping{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &nat.Nat44AddDelIdentityMapping{ @@ -279,7 +276,7 @@ func handleNat44IdentityMapping(ctx *IdentityMappingContext, isAdd bool, vppChan } reply := &nat.Nat44AddDelIdentityMappingReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -289,69 +286,56 @@ func handleNat44IdentityMapping(ctx *IdentityMappingContext, isAdd bool, vppChan return nil } -// EnableNat44Interface enables NAT feature for provided interface -func EnableNat44Interface(ifIdx uint32, isInside bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return handleNat44Interface(ifIdx, isInside, true, vppChan, stopwatch) +func (handler *natVppHandler) EnableNat44Interface(ifIdx uint32, isInside bool) error { + return handler.handleNat44Interface(ifIdx, isInside, true) } -// DisableNat44Interface enables NAT feature for provided interface -func DisableNat44Interface(ifIdx uint32, isInside bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return handleNat44Interface(ifIdx, isInside, false, vppChan, stopwatch) +func (handler *natVppHandler) DisableNat44Interface(ifIdx uint32, isInside bool) error { + return handler.handleNat44Interface(ifIdx, isInside, false) } -// EnableNat44InterfaceOutput enables NAT output feature for provided interface -func EnableNat44InterfaceOutput(ifIdx uint32, isInside bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return handleNat44InterfaceOutputFeature(ifIdx, isInside, true, vppChan, stopwatch) +func (handler *natVppHandler) EnableNat44InterfaceOutput(ifIdx uint32, isInside bool) error { + return handler.handleNat44InterfaceOutputFeature(ifIdx, isInside, true) } -// DisableNat44InterfaceOutput disables NAT output feature for provided interface -func DisableNat44InterfaceOutput(ifIdx uint32, isInside bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return handleNat44InterfaceOutputFeature(ifIdx, isInside, false, vppChan, stopwatch) +func (handler *natVppHandler) DisableNat44InterfaceOutput(ifIdx uint32, isInside bool) error { + return handler.handleNat44InterfaceOutputFeature(ifIdx, isInside, false) } -// AddNat44AddressPool sets new NAT address pool -func AddNat44AddressPool(first, last []byte, vrf uint32, twiceNat bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return handleNat44AddressPool(first, last, vrf, twiceNat, true, vppChan, stopwatch) +func (handler *natVppHandler) AddNat44AddressPool(first, last []byte, vrf uint32, twiceNat bool) error { + return handler.handleNat44AddressPool(first, last, vrf, twiceNat, true) } -// DelNat44AddressPool removes existing NAT address pool -func DelNat44AddressPool(first, last []byte, vrf uint32, twiceNat bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return handleNat44AddressPool(first, last, vrf, twiceNat, false, vppChan, stopwatch) +func (handler *natVppHandler) DelNat44AddressPool(first, last []byte, vrf uint32, twiceNat bool) error { + return handler.handleNat44AddressPool(first, last, vrf, twiceNat, false) } -// AddNat44IdentityMapping sets new NAT address pool -func AddNat44IdentityMapping(ctx *IdentityMappingContext, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return handleNat44IdentityMapping(ctx, true, vppChan, stopwatch) +func (handler *natVppHandler) AddNat44IdentityMapping(ctx *IdentityMappingContext) error { + return handler.handleNat44IdentityMapping(ctx, true) } -// DelNat44IdentityMapping sets new NAT address pool -func DelNat44IdentityMapping(ctx *IdentityMappingContext, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return handleNat44IdentityMapping(ctx, false, vppChan, stopwatch) +func (handler *natVppHandler) DelNat44IdentityMapping(ctx *IdentityMappingContext) error { + return handler.handleNat44IdentityMapping(ctx, false) } -// AddNat44StaticMapping creates new static mapping entry -// (considering address only or both, address and port depending on the context) -func AddNat44StaticMapping(ctx *StaticMappingContext, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *natVppHandler) AddNat44StaticMapping(ctx *StaticMappingContext) error { if ctx.AddressOnly { - return handleNat44StaticMapping(ctx, true, true, vppChan, stopwatch) + return handler.handleNat44StaticMapping(ctx, true, true) } - return handleNat44StaticMapping(ctx, true, false, vppChan, stopwatch) + return handler.handleNat44StaticMapping(ctx, true, false) } -// DelNat44StaticMapping removes existing static mapping entry -func DelNat44StaticMapping(ctx *StaticMappingContext, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *natVppHandler) DelNat44StaticMapping(ctx *StaticMappingContext) error { if ctx.AddressOnly { - return handleNat44StaticMapping(ctx, false, true, vppChan, stopwatch) + return handler.handleNat44StaticMapping(ctx, false, true) } - return handleNat44StaticMapping(ctx, false, false, vppChan, stopwatch) + return handler.handleNat44StaticMapping(ctx, false, false) } -// AddNat44StaticMappingLb creates new static mapping entry with load balancer -func AddNat44StaticMappingLb(ctx *StaticMappingLbContext, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return handleNat44StaticMappingLb(ctx, true, vppChan, stopwatch) +func (handler *natVppHandler) AddNat44StaticMappingLb(ctx *StaticMappingLbContext) error { + return handler.handleNat44StaticMappingLb(ctx, true) } -// DelNat44StaticMappingLb removes existing static mapping entry with load balancer -func DelNat44StaticMappingLb(ctx *StaticMappingLbContext, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return handleNat44StaticMappingLb(ctx, false, vppChan, stopwatch) +func (handler *natVppHandler) DelNat44StaticMappingLb(ctx *StaticMappingLbContext) error { + return handler.handleNat44StaticMappingLb(ctx, false) } diff --git a/plugins/vpp/ifplugin/vppcalls/nat_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/nat_vppcalls_test.go index 934a0ca4c2..6bbb14c88b 100644 --- a/plugins/vpp/ifplugin/vppcalls/nat_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/nat_vppcalls_test.go @@ -21,16 +21,15 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/binapi/nat" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) func TestSetNat44Forwarding(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44ForwardingEnableDisableReply{}) - err := vppcalls.SetNat44Forwarding(true, ctx.MockChannel, nil) + err := natHandler.SetNat44Forwarding(true) Expect(err).ShouldNot(HaveOccurred()) @@ -41,11 +40,11 @@ func TestSetNat44Forwarding(t *testing.T) { } func TestUnsetNat44Forwarding(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44ForwardingEnableDisableReply{}) - err := vppcalls.SetNat44Forwarding(false, ctx.MockChannel, nil) + err := natHandler.SetNat44Forwarding(false) Expect(err).ShouldNot(HaveOccurred()) @@ -56,34 +55,34 @@ func TestUnsetNat44Forwarding(t *testing.T) { } func TestSetNat44ForwardingError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() // Incorrect reply object ctx.MockVpp.MockReply(&nat.Nat44AddDelStaticMappingReply{}) - err := vppcalls.SetNat44Forwarding(true, ctx.MockChannel, nil) + err := natHandler.SetNat44Forwarding(true) Expect(err).Should(HaveOccurred()) } func TestSetNat44ForwardingRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44ForwardingEnableDisableReply{ Retval: 1, }) - err := vppcalls.SetNat44Forwarding(true, ctx.MockChannel, nil) + err := natHandler.SetNat44Forwarding(true) Expect(err).Should(HaveOccurred()) } func TestEnableNat44InterfaceAsInside(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelFeatureReply{}) - err := vppcalls.EnableNat44Interface(1, true, ctx.MockChannel, nil) + err := natHandler.EnableNat44Interface(1, true) Expect(err).ShouldNot(HaveOccurred()) @@ -96,11 +95,11 @@ func TestEnableNat44InterfaceAsInside(t *testing.T) { } func TestEnableNat44InterfaceAsOutside(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelFeatureReply{}) - err := vppcalls.EnableNat44Interface(2, false, ctx.MockChannel, nil) + err := natHandler.EnableNat44Interface(2, false) Expect(err).ShouldNot(HaveOccurred()) @@ -113,34 +112,34 @@ func TestEnableNat44InterfaceAsOutside(t *testing.T) { } func TestEnableNat44InterfaceError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() // Incorrect reply object ctx.MockVpp.MockReply(&nat.Nat44AddDelAddressRangeReply{}) - err := vppcalls.EnableNat44Interface(2, false, ctx.MockChannel, nil) + err := natHandler.EnableNat44Interface(2, false) Expect(err).Should(HaveOccurred()) } func TestEnableNat44InterfaceRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelFeatureReply{ Retval: 1, }) - err := vppcalls.EnableNat44Interface(2, false, ctx.MockChannel, nil) + err := natHandler.EnableNat44Interface(2, false) Expect(err).Should(HaveOccurred()) } func TestDisableNat44InterfaceAsInside(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelFeatureReply{}) - err := vppcalls.DisableNat44Interface(1, true, ctx.MockChannel, nil) + err := natHandler.DisableNat44Interface(1, true) Expect(err).ShouldNot(HaveOccurred()) @@ -153,11 +152,11 @@ func TestDisableNat44InterfaceAsInside(t *testing.T) { } func TestDisableNat44InterfaceAsOutside(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelFeatureReply{}) - err := vppcalls.DisableNat44Interface(2, false, ctx.MockChannel, nil) + err := natHandler.DisableNat44Interface(2, false) Expect(err).ShouldNot(HaveOccurred()) @@ -170,11 +169,11 @@ func TestDisableNat44InterfaceAsOutside(t *testing.T) { } func TestEnableNat44InterfaceOutputAsInside(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelOutputFeatureReply{}) - err := vppcalls.EnableNat44InterfaceOutput(1, true, ctx.MockChannel, nil) + err := natHandler.EnableNat44InterfaceOutput(1, true) Expect(err).ShouldNot(HaveOccurred()) @@ -187,11 +186,11 @@ func TestEnableNat44InterfaceOutputAsInside(t *testing.T) { } func TestEnableNat44InterfaceOutputAsOutside(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelOutputFeatureReply{}) - err := vppcalls.EnableNat44InterfaceOutput(2, false, ctx.MockChannel, nil) + err := natHandler.EnableNat44InterfaceOutput(2, false) Expect(err).ShouldNot(HaveOccurred()) @@ -204,34 +203,34 @@ func TestEnableNat44InterfaceOutputAsOutside(t *testing.T) { } func TestEnableNat44InterfaceOutputError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() // Incorrect reply object ctx.MockVpp.MockReply(&nat.Nat44AddDelStaticMappingReply{}) - err := vppcalls.EnableNat44InterfaceOutput(2, false, ctx.MockChannel, nil) + err := natHandler.EnableNat44InterfaceOutput(2, false) Expect(err).Should(HaveOccurred()) } func TestEnableNat44InterfaceOutputRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelOutputFeatureReply{ Retval: 1, }) - err := vppcalls.EnableNat44InterfaceOutput(2, false, ctx.MockChannel, nil) + err := natHandler.EnableNat44InterfaceOutput(2, false) Expect(err).Should(HaveOccurred()) } func TestDisableNat44InterfaceOutputAsInside(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelOutputFeatureReply{}) - err := vppcalls.DisableNat44InterfaceOutput(1, true, ctx.MockChannel, nil) + err := natHandler.DisableNat44InterfaceOutput(1, true) Expect(err).ShouldNot(HaveOccurred()) @@ -244,11 +243,11 @@ func TestDisableNat44InterfaceOutputAsInside(t *testing.T) { } func TestDisableNat44InterfaceOutputAsOutside(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelOutputFeatureReply{}) - err := vppcalls.DisableNat44InterfaceOutput(2, false, ctx.MockChannel, nil) + err := natHandler.DisableNat44InterfaceOutput(2, false) Expect(err).ShouldNot(HaveOccurred()) @@ -261,14 +260,14 @@ func TestDisableNat44InterfaceOutputAsOutside(t *testing.T) { } func TestAddNat44AddressPool(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() firstIP := net.ParseIP("10.0.0.1").To4() lastIP := net.ParseIP("10.0.0.2").To4() ctx.MockVpp.MockReply(&nat.Nat44AddDelAddressRangeReply{}) - err := vppcalls.AddNat44AddressPool(firstIP, lastIP, 0, false, ctx.MockChannel, nil) + err := natHandler.AddNat44AddressPool(firstIP, lastIP, 0, false) Expect(err).ShouldNot(HaveOccurred()) @@ -282,7 +281,7 @@ func TestAddNat44AddressPool(t *testing.T) { } func TestAddNat44AddressPoolError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() firstIP := net.ParseIP("10.0.0.1").To4() @@ -290,13 +289,13 @@ func TestAddNat44AddressPoolError(t *testing.T) { // Incorrect reply object ctx.MockVpp.MockReply(&nat.Nat44AddDelIdentityMappingReply{}) - err := vppcalls.AddNat44AddressPool(firstIP, lastIP, 0, false, ctx.MockChannel, nil) + err := natHandler.AddNat44AddressPool(firstIP, lastIP, 0, false) Expect(err).Should(HaveOccurred()) } func TestAddNat44AddressPoolRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() firstIP := net.ParseIP("10.0.0.1").To4() @@ -305,20 +304,20 @@ func TestAddNat44AddressPoolRetval(t *testing.T) { ctx.MockVpp.MockReply(&nat.Nat44AddDelAddressRangeReply{ Retval: 1, }) - err := vppcalls.AddNat44AddressPool(firstIP, lastIP, 0, false, ctx.MockChannel, nil) + err := natHandler.AddNat44AddressPool(firstIP, lastIP, 0, false) Expect(err).Should(HaveOccurred()) } func TestDelNat44AddressPool(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() firstIP := net.ParseIP("10.0.0.1").To4() lastIP := net.ParseIP("10.0.0.2").To4() ctx.MockVpp.MockReply(&nat.Nat44AddDelAddressRangeReply{}) - err := vppcalls.DelNat44AddressPool(firstIP, lastIP, 0, false, ctx.MockChannel, nil) + err := natHandler.DelNat44AddressPool(firstIP, lastIP, 0, false) Expect(err).ShouldNot(HaveOccurred()) @@ -332,7 +331,7 @@ func TestDelNat44AddressPool(t *testing.T) { } func TestAddNat44StaticMapping(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() localIP := net.ParseIP("10.0.0.1").To4() @@ -353,7 +352,7 @@ func TestAddNat44StaticMapping(t *testing.T) { } ctx.MockVpp.MockReply(&nat.Nat44AddDelStaticMappingReply{}) - err := vppcalls.AddNat44StaticMapping(stmCtx, ctx.MockChannel, nil) + err := natHandler.AddNat44StaticMapping(stmCtx) Expect(err).ShouldNot(HaveOccurred()) @@ -374,7 +373,7 @@ func TestAddNat44StaticMapping(t *testing.T) { } func TestAddNat44StaticMappingAddrOnly(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() localIP := net.ParseIP("10.0.0.1").To4() @@ -389,7 +388,7 @@ func TestAddNat44StaticMappingAddrOnly(t *testing.T) { } ctx.MockVpp.MockReply(&nat.Nat44AddDelStaticMappingReply{}) - err := vppcalls.AddNat44StaticMapping(stmCtx, ctx.MockChannel, nil) + err := natHandler.AddNat44StaticMapping(stmCtx) Expect(err).ShouldNot(HaveOccurred()) @@ -403,30 +402,30 @@ func TestAddNat44StaticMappingAddrOnly(t *testing.T) { } func TestAddNat44StaticMappingError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() // Incorrect reply object ctx.MockVpp.MockReply(&nat.Nat44AddDelLbStaticMappingReply{}) - err := vppcalls.AddNat44StaticMapping(&vppcalls.StaticMappingContext{}, ctx.MockChannel, nil) + err := natHandler.AddNat44StaticMapping(&vppcalls.StaticMappingContext{}) Expect(err).Should(HaveOccurred()) } func TestAddNat44StaticMappingRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44AddDelStaticMappingReply{ Retval: 1, }) - err := vppcalls.AddNat44StaticMapping(&vppcalls.StaticMappingContext{}, ctx.MockChannel, nil) + err := natHandler.AddNat44StaticMapping(&vppcalls.StaticMappingContext{}) Expect(err).Should(HaveOccurred()) } func TestDelNat44StaticMapping(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() localIP := net.ParseIP("10.0.0.1").To4() @@ -441,7 +440,7 @@ func TestDelNat44StaticMapping(t *testing.T) { } ctx.MockVpp.MockReply(&nat.Nat44AddDelStaticMappingReply{}) - err := vppcalls.DelNat44StaticMapping(stmCtx, ctx.MockChannel, nil) + err := natHandler.DelNat44StaticMapping(stmCtx) Expect(err).ShouldNot(HaveOccurred()) @@ -455,7 +454,7 @@ func TestDelNat44StaticMapping(t *testing.T) { } func TestDelNat44StaticMappingAddrOnly(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() localIP := net.ParseIP("10.0.0.1").To4() @@ -470,7 +469,7 @@ func TestDelNat44StaticMappingAddrOnly(t *testing.T) { } ctx.MockVpp.MockReply(&nat.Nat44AddDelStaticMappingReply{}) - err := vppcalls.DelNat44StaticMapping(stmCtx, ctx.MockChannel, nil) + err := natHandler.DelNat44StaticMapping(stmCtx) Expect(err).ShouldNot(HaveOccurred()) @@ -484,7 +483,7 @@ func TestDelNat44StaticMappingAddrOnly(t *testing.T) { } func TestAddNat44StaticMappingLb(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() externalIP := net.ParseIP("10.0.0.1").To4() @@ -503,7 +502,7 @@ func TestAddNat44StaticMappingLb(t *testing.T) { } ctx.MockVpp.MockReply(&nat.Nat44AddDelLbStaticMappingReply{}) - err := vppcalls.AddNat44StaticMappingLb(stmCtx, ctx.MockChannel, nil) + err := natHandler.AddNat44StaticMappingLb(stmCtx) Expect(err).ShouldNot(HaveOccurred()) @@ -533,30 +532,30 @@ func TestAddNat44StaticMappingLb(t *testing.T) { } func TestAddNat44StaticMappingLbError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() // Incorrect reply object ctx.MockVpp.MockReply(&nat.Nat44AddDelIdentityMappingReply{}) - err := vppcalls.AddNat44StaticMappingLb(&vppcalls.StaticMappingLbContext{}, ctx.MockChannel, nil) + err := natHandler.AddNat44StaticMappingLb(&vppcalls.StaticMappingLbContext{}) Expect(err).Should(HaveOccurred()) } func TestAddNat44StaticMappingLbRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44AddDelLbStaticMappingReply{ Retval: 1, }) - err := vppcalls.AddNat44StaticMappingLb(&vppcalls.StaticMappingLbContext{}, ctx.MockChannel, nil) + err := natHandler.AddNat44StaticMappingLb(&vppcalls.StaticMappingLbContext{}) Expect(err).Should(HaveOccurred()) } func TestDelNat44StaticMappingLb(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() externalIP := net.ParseIP("10.0.0.1").To4() @@ -575,7 +574,7 @@ func TestDelNat44StaticMappingLb(t *testing.T) { } ctx.MockVpp.MockReply(&nat.Nat44AddDelLbStaticMappingReply{}) - err := vppcalls.DelNat44StaticMappingLb(stmCtx, ctx.MockChannel, nil) + err := natHandler.DelNat44StaticMappingLb(stmCtx) Expect(err).ShouldNot(HaveOccurred()) @@ -605,7 +604,7 @@ func TestDelNat44StaticMappingLb(t *testing.T) { } func TestAddNat44IdentityMapping(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() address := net.ParseIP("10.0.0.1").To4() @@ -621,7 +620,7 @@ func TestAddNat44IdentityMapping(t *testing.T) { } ctx.MockVpp.MockReply(&nat.Nat44AddDelIdentityMappingReply{}) - err := vppcalls.AddNat44IdentityMapping(idmCtx, ctx.MockChannel, nil) + err := natHandler.AddNat44IdentityMapping(idmCtx) Expect(err).ShouldNot(HaveOccurred()) @@ -638,7 +637,7 @@ func TestAddNat44IdentityMapping(t *testing.T) { } func TestAddNat44IdentityMappingAddrOnly(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() // DataContext (IPAddress == nil and Port == 0 means it's address only) @@ -650,7 +649,7 @@ func TestAddNat44IdentityMappingAddrOnly(t *testing.T) { } ctx.MockVpp.MockReply(&nat.Nat44AddDelIdentityMappingReply{}) - err := vppcalls.AddNat44IdentityMapping(idmCtx, ctx.MockChannel, nil) + err := natHandler.AddNat44IdentityMapping(idmCtx) Expect(err).ShouldNot(HaveOccurred()) @@ -662,7 +661,7 @@ func TestAddNat44IdentityMappingAddrOnly(t *testing.T) { } func TestAddNat44IdentityMappingNoInterface(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() address := net.ParseIP("10.0.0.1").To4() @@ -677,7 +676,7 @@ func TestAddNat44IdentityMappingNoInterface(t *testing.T) { } ctx.MockVpp.MockReply(&nat.Nat44AddDelIdentityMappingReply{}) - err := vppcalls.AddNat44IdentityMapping(idmCtx, ctx.MockChannel, nil) + err := natHandler.AddNat44IdentityMapping(idmCtx) Expect(err).ShouldNot(HaveOccurred()) @@ -691,30 +690,30 @@ func TestAddNat44IdentityMappingNoInterface(t *testing.T) { } func TestAddNat44IdentityMappingError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() // Incorrect reply object ctx.MockVpp.MockReply(&nat.Nat44AddDelStaticMappingReply{}) - err := vppcalls.AddNat44IdentityMapping(&vppcalls.IdentityMappingContext{}, ctx.MockChannel, nil) + err := natHandler.AddNat44IdentityMapping(&vppcalls.IdentityMappingContext{}) Expect(err).Should(HaveOccurred()) } func TestAddNat44IdentityMappingRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44AddDelIdentityMappingReply{ Retval: 1, }) - err := vppcalls.AddNat44IdentityMapping(&vppcalls.IdentityMappingContext{}, ctx.MockChannel, nil) + err := natHandler.AddNat44IdentityMapping(&vppcalls.IdentityMappingContext{}) Expect(err).Should(HaveOccurred()) } func TestDelNat44IdentityMapping(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, natHandler := natTestSetup(t) defer ctx.TeardownTestCtx() address := net.ParseIP("10.0.0.1").To4() @@ -729,7 +728,7 @@ func TestDelNat44IdentityMapping(t *testing.T) { } ctx.MockVpp.MockReply(&nat.Nat44AddDelIdentityMappingReply{}) - err := vppcalls.DelNat44IdentityMapping(idmCtx, ctx.MockChannel, nil) + err := natHandler.DelNat44IdentityMapping(idmCtx) Expect(err).ShouldNot(HaveOccurred()) diff --git a/plugins/vpp/ifplugin/vppcalls/rx_mode_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/rx_mode_vppcalls.go index 3bc8fd8218..83c52f3027 100644 --- a/plugins/vpp/ifplugin/vppcalls/rx_mode_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/rx_mode_vppcalls.go @@ -18,16 +18,13 @@ import ( "fmt" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" ) -// SetRxMode calls SwInterfaceSetRxMode bin -func SetRxMode(ifIdx uint32, rxModeSettings *intf.Interfaces_Interface_RxModeSettings, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ifVppHandler) SetRxMode(ifIdx uint32, rxModeSettings *intf.Interfaces_Interface_RxModeSettings) error { defer func(t time.Time) { - stopwatch.TimeLog(interfaces.SwInterfaceSetRxMode{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(interfaces.SwInterfaceSetRxMode{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &interfaces.SwInterfaceSetRxMode{ @@ -38,7 +35,7 @@ func SetRxMode(ifIdx uint32, rxModeSettings *intf.Interfaces_Interface_RxModeSet } reply := &interfaces.SwInterfaceSetRxModeReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { diff --git a/plugins/vpp/ifplugin/vppcalls/rx_mode_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/rx_mode_vppcalls_test.go index c479f65067..d616fc04c2 100644 --- a/plugins/vpp/ifplugin/vppcalls/rx_mode_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/rx_mode_vppcalls_test.go @@ -18,23 +18,21 @@ import ( "testing" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" ifModel "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) func TestSetRxMode(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetRxModeReply{}) - err := vppcalls.SetRxMode(1, &ifModel.Interfaces_Interface_RxModeSettings{ + err := ifHandler.SetRxMode(1, &ifModel.Interfaces_Interface_RxModeSettings{ RxMode: ifModel.RxModeType_DEFAULT, QueueId: 1, QueueIdValid: 2, - }, ctx.MockChannel, nil) + }) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*interfaces.SwInterfaceSetRxMode) @@ -46,33 +44,33 @@ func TestSetRxMode(t *testing.T) { } func TestSetRxModeError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetRxMode{}) - err := vppcalls.SetRxMode(1, &ifModel.Interfaces_Interface_RxModeSettings{ + err := ifHandler.SetRxMode(1, &ifModel.Interfaces_Interface_RxModeSettings{ RxMode: ifModel.RxModeType_DEFAULT, QueueId: 1, QueueIdValid: 2, - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestSetRxModeRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetRxModeReply{ Retval: 1, }) - err := vppcalls.SetRxMode(1, &ifModel.Interfaces_Interface_RxModeSettings{ + err := ifHandler.SetRxMode(1, &ifModel.Interfaces_Interface_RxModeSettings{ RxMode: ifModel.RxModeType_DEFAULT, QueueId: 1, QueueIdValid: 2, - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } diff --git a/plugins/vpp/ifplugin/vppcalls/rx_placement_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/rx_placement_vppcalls.go index d7397a06d7..616f68d751 100644 --- a/plugins/vpp/ifplugin/vppcalls/rx_placement_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/rx_placement_vppcalls.go @@ -20,19 +20,14 @@ import ( "strconv" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" ) -// SetRxPlacement -func SetRxPlacement(vppInternalName string, rxPlacement *intf.Interfaces_Interface_RxPlacementSettings, - vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ifVppHandler) SetRxPlacement(vppInternalName string, rxPlacement *intf.Interfaces_Interface_RxPlacementSettings) error { defer func(t time.Time) { - stopwatch.TimeLog(interfaces.SwInterfaceSetRxMode{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(interfaces.SwInterfaceSetRxMode{}).LogTimeEntry(time.Since(t)) }(time.Now()) queue := strconv.Itoa(int(rxPlacement.Queue)) @@ -40,7 +35,7 @@ func SetRxPlacement(vppInternalName string, rxPlacement *intf.Interfaces_Interfa command := "set interface rx-placement " + vppInternalName + " queue " + queue + " worker " + worker - logrus.DefaultLogger().Warnf("Setting rx-placement commnad %s", command) + handler.log.Warnf("Setting rx-placement commnad %s", command) // todo: binary api call for rx-placement is not available req := &vpe.CliInband{ @@ -49,7 +44,7 @@ func SetRxPlacement(vppInternalName string, rxPlacement *intf.Interfaces_Interfa } reply := &vpe.CliInbandReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { diff --git a/plugins/vpp/ifplugin/vppcalls/rx_placement_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/rx_placement_vppcalls_test.go index e05d0dda5c..9b53cc5ce2 100644 --- a/plugins/vpp/ifplugin/vppcalls/rx_placement_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/rx_placement_vppcalls_test.go @@ -18,22 +18,20 @@ import ( "testing" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) func TestSetRxPlacement(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&vpe.CliInbandReply{}) - err := vppcalls.SetRxPlacement("if-internal", &interfaces.Interfaces_Interface_RxPlacementSettings{ + err := ifHandler.SetRxPlacement("if-internal", &interfaces.Interfaces_Interface_RxPlacementSettings{ Queue: 1, Worker: 2, - }, ctx.MockChannel, nil) + }) expMsg := "set interface rx-placement if-internal queue 1 worker 2" expMsgLen := len(expMsg) @@ -46,47 +44,47 @@ func TestSetRxPlacement(t *testing.T) { } func TestSetRxPlacementRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&vpe.CliInbandReply{ Retval: 1, }) - err := vppcalls.SetRxPlacement("if-internal", &interfaces.Interfaces_Interface_RxPlacementSettings{ + err := ifHandler.SetRxPlacement("if-internal", &interfaces.Interfaces_Interface_RxPlacementSettings{ Queue: 1, Worker: 2, - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestSetRxPlacementReply(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&vpe.CliInbandReply{ Reply: []byte("dummy-reply"), }) - err := vppcalls.SetRxPlacement("if-internal", &interfaces.Interfaces_Interface_RxPlacementSettings{ + err := ifHandler.SetRxPlacement("if-internal", &interfaces.Interfaces_Interface_RxPlacementSettings{ Queue: 1, Worker: 2, - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestSetRxPlacementError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&vpe.CliInband{}) - err := vppcalls.SetRxPlacement("if-internal", &interfaces.Interfaces_Interface_RxPlacementSettings{ + err := ifHandler.SetRxPlacement("if-internal", &interfaces.Interfaces_Interface_RxPlacementSettings{ Queue: 1, Worker: 2, - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } diff --git a/plugins/vpp/ifplugin/vppcalls/stn_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/stn_vppcalls.go index d25026d598..beb922ffab 100644 --- a/plugins/vpp/ifplugin/vppcalls/stn_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/stn_vppcalls.go @@ -19,8 +19,6 @@ import ( "net" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/cn-infra/utils/addrs" "github.com/ligato/vpp-agent/plugins/vpp/binapi/stn" ) @@ -31,9 +29,9 @@ type StnRule struct { IfaceIdx uint32 } -func addDelStnRule(ifIdx uint32, addr *net.IP, isAdd bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *stnVppHandler) addDelStnRule(ifIdx uint32, addr *net.IP, isAdd bool) error { defer func(t time.Time) { - stopwatch.TimeLog(stn.StnAddDelRule{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(stn.StnAddDelRule{}).LogTimeEntry(time.Since(t)) }(time.Now()) // prepare the message @@ -55,7 +53,7 @@ func addDelStnRule(ifIdx uint32, addr *net.IP, isAdd bool, vppChan govppapi.Chan } reply := &stn.StnAddDelRuleReply{} - if err = vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err = handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -66,13 +64,11 @@ func addDelStnRule(ifIdx uint32, addr *net.IP, isAdd bool, vppChan govppapi.Chan } -// AddStnRule calls StnAddDelRule bin API with IsAdd=1 -func AddStnRule(ifIdx uint32, addr *net.IP, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return addDelStnRule(ifIdx, addr, true, vppChan, stopwatch) +func (handler *stnVppHandler) AddStnRule(ifIdx uint32, addr *net.IP) error { + return handler.addDelStnRule(ifIdx, addr, true) } -// DelStnRule calls StnAddDelRule bin API with IsAdd=0 -func DelStnRule(ifIdx uint32, addr *net.IP, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return addDelStnRule(ifIdx, addr, false, vppChan, stopwatch) +func (handler *stnVppHandler) DelStnRule(ifIdx uint32, addr *net.IP) error { + return handler.addDelStnRule(ifIdx, addr, false) } diff --git a/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go index 1f2c5b957b..599ec85a58 100644 --- a/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go @@ -18,6 +18,8 @@ import ( "net" "testing" + "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/stn" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" "github.com/ligato/vpp-agent/tests/vppcallmock" @@ -25,13 +27,13 @@ import ( ) func TestAddStnRule(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, stnHandler := stnTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&stn.StnAddDelRuleReply{}) _, ip, _ := net.ParseCIDR("10.0.0.1/24") - err := vppcalls.AddStnRule(1, &ip.IP, ctx.MockChannel, nil) + err := stnHandler.AddStnRule(1, &ip.IP) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*stn.StnAddDelRule) @@ -43,13 +45,13 @@ func TestAddStnRule(t *testing.T) { } func TestAddStnRuleIPv6(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, stnHandler := stnTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&stn.StnAddDelRuleReply{}) _, ip, _ := net.ParseCIDR("2001:db8:0:1:1:1:1:1/128") - err := vppcalls.AddStnRule(1, &ip.IP, ctx.MockChannel, nil) + err := stnHandler.AddStnRule(1, &ip.IP) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*stn.StnAddDelRule) @@ -61,31 +63,31 @@ func TestAddStnRuleIPv6(t *testing.T) { } func TestAddStnRuleInvalidIP(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, stnHandler := stnTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&stn.StnAddDelRuleReply{}) var ip net.IP = []byte("invalid-ip") - err := vppcalls.AddStnRule(1, &ip, ctx.MockChannel, nil) + err := stnHandler.AddStnRule(1, &ip) Expect(err).ToNot(BeNil()) } func TestAddStnRuleError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, stnHandler := stnTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&stn.StnAddDelRule{}) _, ip, _ := net.ParseCIDR("10.0.0.1/24") - err := vppcalls.AddStnRule(1, &ip.IP, ctx.MockChannel, nil) + err := stnHandler.AddStnRule(1, &ip.IP) Expect(err).ToNot(BeNil()) } func TestAddStnRuleRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, stnHandler := stnTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&stn.StnAddDelRuleReply{ @@ -93,22 +95,29 @@ func TestAddStnRuleRetval(t *testing.T) { }) _, ip, _ := net.ParseCIDR("10.0.0.1/24") - err := vppcalls.AddStnRule(1, &ip.IP, ctx.MockChannel, nil) + err := stnHandler.AddStnRule(1, &ip.IP) Expect(err).ToNot(BeNil()) } func TestDelStnRule(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, stnHandler := stnTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&stn.StnAddDelRuleReply{}) _, ip, _ := net.ParseCIDR("10.0.0.1/24") - err := vppcalls.DelStnRule(1, &ip.IP, ctx.MockChannel, nil) + err := stnHandler.DelStnRule(1, &ip.IP) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*stn.StnAddDelRule) Expect(ok).To(BeTrue()) Expect(vppMsg.IsAdd).To(BeEquivalentTo(0)) } + +func stnTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.StnVppAPI) { + ctx := vppcallmock.SetupTestCtx(t) + log := logrus.NewLogger("test-log") + stnHandler := vppcalls.NewStnVppHandler(ctx.MockChannel, measure.NewStopwatch("test-stopwatch", log)) + return ctx, stnHandler +} diff --git a/plugins/vpp/ifplugin/vppcalls/tap_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/tap_vppcalls.go index d5a2fc6309..575bec225e 100644 --- a/plugins/vpp/ifplugin/vppcalls/tap_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/tap_vppcalls.go @@ -19,17 +19,14 @@ import ( "fmt" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/tap" "github.com/ligato/vpp-agent/plugins/vpp/binapi/tapv2" "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" ) -// AddTapInterface calls TapConnect bin API. -func AddTapInterface(ifName string, tapIf *interfaces.Interfaces_Interface_Tap, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (swIfIdx uint32, err error) { +func (handler *ifVppHandler) AddTapInterface(ifName string, tapIf *interfaces.Interfaces_Interface_Tap) (swIfIdx uint32, err error) { defer func(t time.Time) { - stopwatch.TimeLog(tap.TapConnect{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(tap.TapConnect{}).LogTimeEntry(time.Since(t)) }(time.Now()) if tapIf == nil || tapIf.HostIfName == "" { @@ -56,7 +53,7 @@ func AddTapInterface(ifName string, tapIf *interfaces.Interfaces_Interface_Tap, } reply := &tapv2.TapCreateV2Reply{} - err = vppChan.SendRequest(req).ReceiveReply(reply) + err = handler.callsChannel.SendRequest(req).ReceiveReply(reply) retval = reply.Retval swIfIdx = reply.SwIfIndex msgName = reply.GetMessageName() @@ -68,7 +65,7 @@ func AddTapInterface(ifName string, tapIf *interfaces.Interfaces_Interface_Tap, } reply := &tap.TapConnectReply{} - err = vppChan.SendRequest(req).ReceiveReply(reply) + err = handler.callsChannel.SendRequest(req).ReceiveReply(reply) retval = reply.Retval swIfIdx = reply.SwIfIndex msgName = reply.GetMessageName() @@ -80,13 +77,12 @@ func AddTapInterface(ifName string, tapIf *interfaces.Interfaces_Interface_Tap, return 0, fmt.Errorf("%s returned %d", msgName, retval) } - return swIfIdx, SetInterfaceTag(ifName, swIfIdx, vppChan, stopwatch) + return swIfIdx, handler.SetInterfaceTag(ifName, swIfIdx) } -// DeleteTapInterface calls TapDelete bin API. -func DeleteTapInterface(ifName string, idx uint32, version uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ifVppHandler) DeleteTapInterface(ifName string, idx uint32, version uint32) error { defer func(t time.Time) { - stopwatch.TimeLog(tap.TapDelete{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(tap.TapDelete{}).LogTimeEntry(time.Since(t)) }(time.Now()) var ( @@ -100,7 +96,7 @@ func DeleteTapInterface(ifName string, idx uint32, version uint32, vppChan govpp } reply := &tapv2.TapDeleteV2Reply{} - err = vppChan.SendRequest(req).ReceiveReply(reply) + err = handler.callsChannel.SendRequest(req).ReceiveReply(reply) retval = reply.Retval msgName = reply.GetMessageName() } else { @@ -109,7 +105,7 @@ func DeleteTapInterface(ifName string, idx uint32, version uint32, vppChan govpp } reply := &tap.TapDeleteReply{} - err = vppChan.SendRequest(req).ReceiveReply(reply) + err = handler.callsChannel.SendRequest(req).ReceiveReply(reply) retval = reply.Retval msgName = reply.GetMessageName() } @@ -120,5 +116,5 @@ func DeleteTapInterface(ifName string, idx uint32, version uint32, vppChan govpp return fmt.Errorf("%s returned %d", msgName, retval) } - return RemoveInterfaceTag(ifName, idx, vppChan, stopwatch) + return handler.RemoveInterfaceTag(ifName, idx) } diff --git a/plugins/vpp/ifplugin/vppcalls/tap_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/tap_vppcalls_test.go index 6c2300e4cb..4bb66f0afd 100644 --- a/plugins/vpp/ifplugin/vppcalls/tap_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/tap_vppcalls_test.go @@ -20,14 +20,12 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/binapi/tap" "github.com/ligato/vpp-agent/plugins/vpp/binapi/tapv2" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" ifModel "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) func TestAddTapInterface(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&tap.TapConnectReply{ @@ -35,13 +33,13 @@ func TestAddTapInterface(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - swIfIdx, err := vppcalls.AddTapInterface("tapIf", &ifModel.Interfaces_Interface_Tap{ + swIfIdx, err := ifHandler.AddTapInterface("tapIf", &ifModel.Interfaces_Interface_Tap{ Version: 1, HostIfName: "hostIf", Namespace: "ns1", RxRingSize: 1, TxRingSize: 1, - }, ctx.MockChannel, nil) + }) Expect(err).To(BeNil()) Expect(swIfIdx).To(BeEquivalentTo(1)) var msgCheck bool @@ -57,7 +55,7 @@ func TestAddTapInterface(t *testing.T) { } func TestAddTapInterfaceV2(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&tapv2.TapCreateV2Reply{ @@ -65,13 +63,13 @@ func TestAddTapInterfaceV2(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - swIfIdx, err := vppcalls.AddTapInterface("tapIf", &ifModel.Interfaces_Interface_Tap{ + swIfIdx, err := ifHandler.AddTapInterface("tapIf", &ifModel.Interfaces_Interface_Tap{ Version: 2, HostIfName: "hostIf", Namespace: "ns1", RxRingSize: 1, TxRingSize: 1, - }, ctx.MockChannel, nil) + }) Expect(err).To(BeNil()) Expect(swIfIdx).To(BeEquivalentTo(1)) var msgCheck bool @@ -88,7 +86,7 @@ func TestAddTapInterfaceV2(t *testing.T) { } func TestAddTapInterfaceNoInput(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&tap.TapConnectReply{ @@ -96,29 +94,29 @@ func TestAddTapInterfaceNoInput(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - _, err := vppcalls.AddTapInterface("tapIf", nil, ctx.MockChannel, nil) + _, err := ifHandler.AddTapInterface("tapIf", nil) Expect(err).ToNot(BeNil()) } func TestAddTapInterfaceError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&tap.TapConnect{}) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - _, err := vppcalls.AddTapInterface("tapIf", &ifModel.Interfaces_Interface_Tap{ + _, err := ifHandler.AddTapInterface("tapIf", &ifModel.Interfaces_Interface_Tap{ Version: 1, HostIfName: "hostIf", Namespace: "ns1", RxRingSize: 1, TxRingSize: 1, - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestAddTapInterfaceRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&tap.TapConnectReply{ @@ -126,24 +124,24 @@ func TestAddTapInterfaceRetval(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - _, err := vppcalls.AddTapInterface("tapIf", &ifModel.Interfaces_Interface_Tap{ + _, err := ifHandler.AddTapInterface("tapIf", &ifModel.Interfaces_Interface_Tap{ Version: 1, HostIfName: "hostIf", Namespace: "ns1", RxRingSize: 1, TxRingSize: 1, - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestDeleteTapInterface(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&tap.TapDeleteReply{}) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - err := vppcalls.DeleteTapInterface("tapIf", 1, 1, ctx.MockChannel, nil) + err := ifHandler.DeleteTapInterface("tapIf", 1, 1) Expect(err).To(BeNil()) var msgCheck bool for _, msg := range ctx.MockChannel.Msgs { @@ -157,13 +155,13 @@ func TestDeleteTapInterface(t *testing.T) { } func TestDeleteTapInterfaceV2(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&tapv2.TapDeleteV2Reply{}) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - err := vppcalls.DeleteTapInterface("tapIf", 1, 2, ctx.MockChannel, nil) + err := ifHandler.DeleteTapInterface("tapIf", 1, 2) Expect(err).To(BeNil()) var msgCheck bool for _, msg := range ctx.MockChannel.Msgs { @@ -177,18 +175,18 @@ func TestDeleteTapInterfaceV2(t *testing.T) { } func TestDeleteTapInterfaceError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&tap.TapDelete{}) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - err := vppcalls.DeleteTapInterface("tapIf", 1, 1, ctx.MockChannel, nil) + err := ifHandler.DeleteTapInterface("tapIf", 1, 1) Expect(err).ToNot(BeNil()) } func TestDeleteTapInterfaceRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&tap.TapDeleteReply{ @@ -196,6 +194,6 @@ func TestDeleteTapInterfaceRetval(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - err := vppcalls.DeleteTapInterface("tapIf", 1, 1, ctx.MockChannel, nil) + err := ifHandler.DeleteTapInterface("tapIf", 1, 1) Expect(err).ToNot(BeNil()) } diff --git a/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls.go index c1215a0ccb..4b090ff3d4 100644 --- a/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls.go @@ -24,10 +24,8 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" ) -// GetInterfaceVRF assigns VRF table to interface -func GetInterfaceVRF(ifIdx uint32, log logging.Logger, vppChan govppapi.Channel) (vrfID uint32, err error) { - log.Debugf("Getting VRF for interface %v", ifIdx) - +func (handler *ifVppHandler) GetInterfaceVRF(ifIdx uint32) (vrfID uint32, err error) { + handler.log.Debugf("Getting VRF for interface %v", ifIdx) req := &interfaces.SwInterfaceGetTable{ SwIfIndex: ifIdx, } @@ -39,7 +37,7 @@ func GetInterfaceVRF(ifIdx uint32, log logging.Logger, vppChan govppapi.Channel) // Send message reply := &interfaces.SwInterfaceGetTableReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return 0, err } if reply.Retval != 0 { @@ -49,15 +47,12 @@ func GetInterfaceVRF(ifIdx uint32, log logging.Logger, vppChan govppapi.Channel) return reply.VrfID, nil } -// SetInterfaceVRF retrieves VRF table from interface -func SetInterfaceVRF(ifaceIndex, vrfID uint32, log logging.Logger, vppChan govppapi.Channel) error { - if err := CreateVrfIfNeeded(vrfID, vppChan); err != nil { - log.Warnf("creating VRF failed: %v", err) - return err +func (handler *ifVppHandler) SetInterfaceVRF(ifaceIndex, vrfID uint32) error { + if err := handler.CreateVrfIfNeeded(vrfID); err != nil { + return fmt.Errorf("creating VRF failed: %v", err) } - log.Debugf("Setting interface %v to VRF %v", ifaceIndex, vrfID) - + handler.log.Debugf("Setting interface %v to VRF %v", ifaceIndex, vrfID) req := &interfaces.SwInterfaceSetTable{ VrfID: vrfID, SwIfIndex: ifaceIndex, @@ -70,7 +65,7 @@ func SetInterfaceVRF(ifaceIndex, vrfID uint32, log logging.Logger, vppChan govpp // Send message reply := new(interfaces.SwInterfaceSetTableReply) - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -82,29 +77,28 @@ func SetInterfaceVRF(ifaceIndex, vrfID uint32, log logging.Logger, vppChan govpp // TODO: manage VRF tables globally in separate configurator -// CreateVrfIfNeeded checks if VRF exists and creates it if not -func CreateVrfIfNeeded(vrfID uint32, vppChan govppapi.Channel) error { +func (handler *ifVppHandler) CreateVrfIfNeeded(vrfID uint32) error { if vrfID == 0 { return nil } - tables, err := dumpVrfTables(vppChan) + tables, err := handler.dumpVrfTables() if err != nil { logrus.DefaultLogger().Warnf("dumping VRF tables failed: %v", err) return err } if _, ok := tables[vrfID]; !ok { logrus.DefaultLogger().Infof("VRF table %v does not exists, creating it", vrfID) - return vppAddIPTable(vrfID, vppChan) + return handler.vppAddIPTable(vrfID) } return nil } -func dumpVrfTables(vppChan govppapi.Channel) (map[uint32][]*ip.IPFibDetails, error) { +func (handler *ifVppHandler) dumpVrfTables() (map[uint32][]*ip.IPFibDetails, error) { fibs := map[uint32][]*ip.IPFibDetails{} - reqCtx := vppChan.SendMultiRequest(&ip.IPFibDump{}) + reqCtx := handler.callsChannel.SendMultiRequest(&ip.IPFibDump{}) for { fibDetails := &ip.IPFibDetails{} stop, err := reqCtx.ReceiveReply(fibDetails) @@ -122,7 +116,7 @@ func dumpVrfTables(vppChan govppapi.Channel) (map[uint32][]*ip.IPFibDetails, err return fibs, nil } -func vppAddIPTable(tableID uint32, vppChan govppapi.Channel) error { +func (handler *ifVppHandler) vppAddIPTable(tableID uint32) error { req := &ip.IPTableAddDel{ TableID: tableID, IsAdd: 1, @@ -130,7 +124,7 @@ func vppAddIPTable(tableID uint32, vppChan govppapi.Channel) error { // Send message reply := &ip.IPTableAddDelReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { diff --git a/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls_test.go index 88f6dd22ef..62fee7e699 100644 --- a/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls_test.go @@ -17,52 +17,49 @@ package vppcalls_test import ( "testing" - "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) func TestGetInterfaceVRF(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceGetTableReply{ VrfID: 1, }) - vrfID, err := vppcalls.GetInterfaceVRF(1, logrus.DefaultLogger(), ctx.MockChannel) + vrfID, err := ifHandler.GetInterfaceVRF(1) Expect(err).To(BeNil()) Expect(vrfID).To(BeEquivalentTo(1)) } func TestGetInterfaceVRFError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceGetTable{}) - _, err := vppcalls.GetInterfaceVRF(1, logrus.DefaultLogger(), ctx.MockChannel) + _, err := ifHandler.GetInterfaceVRF(1) Expect(err).ToNot(BeNil()) } func TestGetInterfaceVRFRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&interfaces.SwInterfaceGetTableReply{ Retval: 1, }) - _, err := vppcalls.GetInterfaceVRF(1, logrus.DefaultLogger(), ctx.MockChannel) + _, err := ifHandler.GetInterfaceVRF(1) Expect(err).ToNot(BeNil()) } func TestSetInterfaceVRF(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPFibDetails{}) @@ -70,7 +67,7 @@ func TestSetInterfaceVRF(t *testing.T) { ctx.MockVpp.MockReply(&ip.IPTableAddDelReply{}) ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetTableReply{}) - err := vppcalls.SetInterfaceVRF(1, 2, logrus.DefaultLogger(), ctx.MockChannel) + err := ifHandler.SetInterfaceVRF(1, 2) Expect(err).To(BeNil()) vppMsg, ok := ctx.MockChannel.Msg.(*interfaces.SwInterfaceSetTable) Expect(ok).To(BeTrue()) @@ -79,7 +76,7 @@ func TestSetInterfaceVRF(t *testing.T) { } func TestSetInterfaceVRFError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPFibDetails{}) @@ -87,12 +84,12 @@ func TestSetInterfaceVRFError(t *testing.T) { ctx.MockVpp.MockReply(&ip.IPTableAddDelReply{}) ctx.MockVpp.MockReply(&interfaces.SwInterfaceSetTable{}) - err := vppcalls.SetInterfaceVRF(1, 2, logrus.DefaultLogger(), ctx.MockChannel) + err := ifHandler.SetInterfaceVRF(1, 2) Expect(err).To(HaveOccurred()) } func TestSetInterfaceVRFRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPFibDetails{}) @@ -102,12 +99,12 @@ func TestSetInterfaceVRFRetval(t *testing.T) { Retval: 1, }) - err := vppcalls.SetInterfaceVRF(1, 2, logrus.DefaultLogger(), ctx.MockChannel) + err := ifHandler.SetInterfaceVRF(1, 2) Expect(err).ToNot(BeNil()) } func TestCreateVrfIfNeeded(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() // IP FIB dump @@ -116,7 +113,7 @@ func TestCreateVrfIfNeeded(t *testing.T) { // Add/del table ctx.MockVpp.MockReply(&ip.IPTableAddDelReply{}) - err := vppcalls.CreateVrfIfNeeded(1, ctx.MockChannel) + err := ifHandler.CreateVrfIfNeeded(1) Expect(err).To(BeNil()) var msgCheck bool for _, msg := range ctx.MockChannel.Msgs { @@ -132,7 +129,7 @@ func TestCreateVrfIfNeeded(t *testing.T) { } func TestCreateVrfIfNeededNull(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() // IP FIB dump @@ -141,12 +138,12 @@ func TestCreateVrfIfNeededNull(t *testing.T) { // Add/del table ctx.MockVpp.MockReply(&ip.IPTableAddDelReply{}) - err := vppcalls.CreateVrfIfNeeded(0, ctx.MockChannel) + err := ifHandler.CreateVrfIfNeeded(0) Expect(err).To(BeNil()) } func TestCreateVrfIfNeededError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() // IP FIB dump @@ -155,12 +152,12 @@ func TestCreateVrfIfNeededError(t *testing.T) { // Add/del table ctx.MockVpp.MockReply(&ip.IPTableAddDel{}) - err := vppcalls.CreateVrfIfNeeded(1, ctx.MockChannel) + err := ifHandler.CreateVrfIfNeeded(1) Expect(err).ToNot(BeNil()) } func TestCreateVrfIfNeededRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() // IP FIB dump @@ -171,6 +168,6 @@ func TestCreateVrfIfNeededRetval(t *testing.T) { Retval: 1, }) - err := vppcalls.CreateVrfIfNeeded(1, ctx.MockChannel) + err := ifHandler.CreateVrfIfNeeded(1) Expect(err).ToNot(BeNil()) } diff --git a/plugins/vpp/ifplugin/vppcalls/vxlan_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/vxlan_vppcalls.go index 3a75c3d924..929b681899 100644 --- a/plugins/vpp/ifplugin/vppcalls/vxlan_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/vxlan_vppcalls.go @@ -19,20 +19,17 @@ import ( "net" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vxlan" intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" ) -func addDelVxlanTunnel(iface *intf.Interfaces_Interface_Vxlan, encVrf, multicastIf uint32, isAdd bool, vppChan govppapi.Channel, - stopwatch *measure.Stopwatch) (swIdx uint32, err error) { +func (handler *ifVppHandler) addDelVxlanTunnel(iface *intf.Interfaces_Interface_Vxlan, encVrf, multicastIf uint32, isAdd bool) (swIdx uint32, err error) { defer func(t time.Time) { - stopwatch.TimeLog(vxlan.VxlanAddDelTunnel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(vxlan.VxlanAddDelTunnel{}).LogTimeEntry(time.Since(t)) }(time.Now()) // this is temporary fix to solve creation of VRF table for VXLAN - if err := CreateVrfIfNeeded(encVrf, vppChan); err != nil { + if err := handler.CreateVrfIfNeeded(encVrf); err != nil { return 0, err } @@ -62,7 +59,7 @@ func addDelVxlanTunnel(iface *intf.Interfaces_Interface_Vxlan, encVrf, multicast req.DstAddress = []byte(dstAddr) reply := &vxlan.VxlanAddDelTunnelReply{} - if err = vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err = handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return 0, err } if reply.Retval != 0 { @@ -72,19 +69,17 @@ func addDelVxlanTunnel(iface *intf.Interfaces_Interface_Vxlan, encVrf, multicast return reply.SwIfIndex, nil } -// AddVxlanTunnel calls AddDelVxlanTunnelReq with flag add=1. -func AddVxlanTunnel(ifName string, vxlanIntf *intf.Interfaces_Interface_Vxlan, encapVrf, multicastIf uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (swIndex uint32, err error) { - swIfIdx, err := addDelVxlanTunnel(vxlanIntf, encapVrf, multicastIf, true, vppChan, stopwatch) +func (handler *ifVppHandler) AddVxlanTunnel(ifName string, vxlanIntf *intf.Interfaces_Interface_Vxlan, encapVrf, multicastIf uint32) (swIndex uint32, err error) { + swIfIdx, err := handler.addDelVxlanTunnel(vxlanIntf, encapVrf, multicastIf, true) if err != nil { return 0, err } - return swIfIdx, SetInterfaceTag(ifName, swIfIdx, vppChan, stopwatch) + return swIfIdx, handler.SetInterfaceTag(ifName, swIfIdx) } -// DeleteVxlanTunnel calls AddDelVxlanTunnelReq with flag add=0. -func DeleteVxlanTunnel(ifName string, idx uint32, vxlanIntf *intf.Interfaces_Interface_Vxlan, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - if _, err := addDelVxlanTunnel(vxlanIntf, 0, 0, false, vppChan, stopwatch); err != nil { +func (handler *ifVppHandler) DeleteVxlanTunnel(ifName string, idx uint32, vxlanIntf *intf.Interfaces_Interface_Vxlan) error { + if _, err := handler.addDelVxlanTunnel(vxlanIntf, 0, 0, false); err != nil { return err } - return RemoveInterfaceTag(ifName, idx, vppChan, stopwatch) + return handler.RemoveInterfaceTag(ifName, idx) } diff --git a/plugins/vpp/ifplugin/vppcalls/vxlan_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/vxlan_vppcalls_test.go index becccdcea6..8bbff943b6 100644 --- a/plugins/vpp/ifplugin/vppcalls/vxlan_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/vxlan_vppcalls_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package vppcalls +package vppcalls_test import ( "net" @@ -23,12 +23,11 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vxlan" ifModel "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) func TestAddVxlanTunnel(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&vxlan.VxlanAddDelTunnelReply{ @@ -36,11 +35,11 @@ func TestAddVxlanTunnel(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - swIfIdx, err := AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ + swIfIdx, err := ifHandler.AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ SrcAddress: "10.0.0.1", DstAddress: "20.0.0.1", Vni: 1, - }, 0, 2, ctx.MockChannel, nil) + }, 0, 2) Expect(err).To(BeNil()) Expect(swIfIdx).To(BeEquivalentTo(1)) var msgCheck bool @@ -61,7 +60,7 @@ func TestAddVxlanTunnel(t *testing.T) { } func TestAddVxlanTunnelWithVrf(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() // VRF resolution @@ -74,11 +73,11 @@ func TestAddVxlanTunnelWithVrf(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - swIfIdx, err := AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ + swIfIdx, err := ifHandler.AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ SrcAddress: "10.0.0.1", DstAddress: "20.0.0.1", Vni: 1, - }, 1, 1, ctx.MockChannel, nil) + }, 1, 1) Expect(err).To(BeNil()) Expect(swIfIdx).To(BeEquivalentTo(1)) var msgCheck bool @@ -99,7 +98,7 @@ func TestAddVxlanTunnelWithVrf(t *testing.T) { } func TestAddVxlanTunnelIPv6(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&vxlan.VxlanAddDelTunnelReply{ @@ -107,11 +106,11 @@ func TestAddVxlanTunnelIPv6(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - swIfIdx, err := AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ + swIfIdx, err := ifHandler.AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ SrcAddress: "2001:db8:0:1:1:1:1:1", DstAddress: "2002:db8:0:1:1:1:1:1", Vni: 1, - }, 0, 0, ctx.MockChannel, nil) + }, 0, 0) Expect(err).To(BeNil()) Expect(swIfIdx).To(BeEquivalentTo(1)) var msgCheck bool @@ -128,7 +127,7 @@ func TestAddVxlanTunnelIPv6(t *testing.T) { } func TestAddVxlanTunnelIPMismatch(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&vxlan.VxlanAddDelTunnelReply{ @@ -136,16 +135,16 @@ func TestAddVxlanTunnelIPMismatch(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - _, err := AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ + _, err := ifHandler.AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ SrcAddress: "10.0.0.1", DstAddress: "2001:db8:0:1:1:1:1:1", Vni: 1, - }, 0, 0, ctx.MockChannel, nil) + }, 0, 0) Expect(err).ToNot(BeNil()) } func TestAddVxlanTunnelInvalidIP(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&vxlan.VxlanAddDelTunnelReply{ @@ -153,31 +152,31 @@ func TestAddVxlanTunnelInvalidIP(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - _, err := AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ + _, err := ifHandler.AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ SrcAddress: "invalid-ip", DstAddress: "2001:db8:0:1:1:1:1:1", Vni: 1, - }, 0, 0, ctx.MockChannel, nil) + }, 0, 0) Expect(err).ToNot(BeNil()) } func TestAddVxlanTunnelError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&vxlan.VxlanAddDelTunnel{}) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - _, err := AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ + _, err := ifHandler.AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ SrcAddress: "10.0.0.1", DstAddress: "20.0.0.2", Vni: 1, - }, 0, 0, ctx.MockChannel, nil) + }, 0, 0) Expect(err).ToNot(BeNil()) } func TestAddVxlanTunnelWithVrfError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() // VRF resolution @@ -190,16 +189,16 @@ func TestAddVxlanTunnelWithVrfError(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - _, err := AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ + _, err := ifHandler.AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ SrcAddress: "10.0.0.1", DstAddress: "20.0.0.1", Vni: 1, - }, 1, 0, ctx.MockChannel, nil) + }, 1, 0) Expect(err).ToNot(BeNil()) } func TestAddVxlanTunnelRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&vxlan.VxlanAddDelTunnelReply{ @@ -207,16 +206,16 @@ func TestAddVxlanTunnelRetval(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - _, err := AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ + _, err := ifHandler.AddVxlanTunnel("ifName", &ifModel.Interfaces_Interface_Vxlan{ SrcAddress: "10.0.0.1", DstAddress: "20.0.0.2", Vni: 1, - }, 0, 0, ctx.MockChannel, nil) + }, 0, 0) Expect(err).ToNot(BeNil()) } func TestDeleteVxlanTunnel(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&vxlan.VxlanAddDelTunnelReply{ @@ -224,31 +223,31 @@ func TestDeleteVxlanTunnel(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - err := DeleteVxlanTunnel("ifName", 1, &ifModel.Interfaces_Interface_Vxlan{ + err := ifHandler.DeleteVxlanTunnel("ifName", 1, &ifModel.Interfaces_Interface_Vxlan{ SrcAddress: "10.0.0.1", DstAddress: "20.0.0.1", Vni: 1, - }, ctx.MockChannel, nil) + }) Expect(err).To(BeNil()) } func TestDeleteVxlanTunnelError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&vxlan.VxlanAddDelTunnel{}) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - err := DeleteVxlanTunnel("ifName", 1, &ifModel.Interfaces_Interface_Vxlan{ + err := ifHandler.DeleteVxlanTunnel("ifName", 1, &ifModel.Interfaces_Interface_Vxlan{ SrcAddress: "10.0.0.1", DstAddress: "20.0.0.1", Vni: 1, - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } func TestDeleteVxlanTunnelRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler := ifTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&vxlan.VxlanAddDelTunnelReply{ @@ -256,10 +255,10 @@ func TestDeleteVxlanTunnelRetval(t *testing.T) { }) ctx.MockVpp.MockReply(&interfaces.SwInterfaceTagAddDelReply{}) - err := DeleteVxlanTunnel("ifName", 1, &ifModel.Interfaces_Interface_Vxlan{ + err := ifHandler.DeleteVxlanTunnel("ifName", 1, &ifModel.Interfaces_Interface_Vxlan{ SrcAddress: "10.0.0.1", DstAddress: "20.0.0.1", Vni: 1, - }, ctx.MockChannel, nil) + }) Expect(err).ToNot(BeNil()) } diff --git a/plugins/vpp/ifplugin/vppdump/doc.go b/plugins/vpp/ifplugin/vppdump/doc.go deleted file mode 100644 index 7d6d25c5c6..0000000000 --- a/plugins/vpp/ifplugin/vppdump/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package vppdump provides helpers for dumping all interfaces configured in VPP. -package vppdump diff --git a/plugins/vpp/ipsecplugin/ipsec_config.go b/plugins/vpp/ipsecplugin/ipsec_config.go index d05d091f49..ad4adaaffd 100644 --- a/plugins/vpp/ipsecplugin/ipsec_config.go +++ b/plugins/vpp/ipsecplugin/ipsec_config.go @@ -61,6 +61,9 @@ type IPSecConfigurator struct { // VPP channel vppCh govppapi.Channel + // VPP API handlers + ifHandler iface_vppcalls.IfVppAPI + // Timer used to measure and store time stopwatch *measure.Stopwatch } @@ -91,6 +94,9 @@ func (plugin *IPSecConfigurator) Init(logger logging.PluginLogger, goVppMux govp plugin.stopwatch = measure.NewStopwatch("IPSecConfigurator", plugin.log) } + // VPP API handlers + plugin.ifHandler = iface_vppcalls.NewIfVppHandler(plugin.vppCh, plugin.log, plugin.stopwatch) + // Message compatibility if err = plugin.vppCh.CheckMessageCompatibility(vppcalls.IPSecMessages...); err != nil { plugin.log.Error(err) @@ -336,7 +342,7 @@ func (plugin *IPSecConfigurator) ConfigureTunnel(tunnel *ipsec.TunnelInterfaces_ plugin.ifIndexes.RegisterName(tunnel.Name, ifIdx, nil) plugin.log.Infof("Registered Tunnel %v (%d)", tunnel.Name, ifIdx) - if err := iface_vppcalls.SetInterfaceVRF(ifIdx, tunnel.Vrf, plugin.log, plugin.vppCh); err != nil { + if err := plugin.ifHandler.SetInterfaceVRF(ifIdx, tunnel.Vrf); err != nil { return err } @@ -345,14 +351,14 @@ func (plugin *IPSecConfigurator) ConfigureTunnel(tunnel *ipsec.TunnelInterfaces_ return err } for _, ip := range ipAddrs { - if err := iface_vppcalls.AddInterfaceIP(ifIdx, ip, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.AddInterfaceIP(ifIdx, ip); err != nil { plugin.log.Errorf("adding interface IP address failed: %v", err) return err } } if tunnel.Enabled { - if err := iface_vppcalls.InterfaceAdminUp(ifIdx, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ifHandler.InterfaceAdminUp(ifIdx); err != nil { plugin.log.Debugf("setting interface up failed: %v", err) return err } diff --git a/plugins/vpp/l2plugin/bd_config.go b/plugins/vpp/l2plugin/bd_config.go index 07d881a979..2ee69a20cc 100644 --- a/plugins/vpp/l2plugin/bd_config.go +++ b/plugins/vpp/l2plugin/bd_config.go @@ -31,6 +31,7 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/l2idx" "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/l2" + ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" ) // BDConfigurator runs in the background in its own goroutine where it watches for any changes @@ -52,6 +53,9 @@ type BDConfigurator struct { // State notification channel notificationChan chan BridgeDomainStateMessage // Injected, do not close here + // VPP API handlers + ifHandler ifvppcalls.IfVppAPI + // Timer used to measure and store time stopwatch *measure.Stopwatch } @@ -94,6 +98,9 @@ func (plugin *BDConfigurator) Init(logger logging.PluginLogger, goVppMux govppmu plugin.stopwatch = measure.NewStopwatch("ACLConfigurator", plugin.log) } + // VPP API handlers + plugin.ifHandler = ifvppcalls.NewIfVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch) + // Message compatibility err = plugin.vppChan.CheckMessageCompatibility(vppcalls.BridgeDomainMessages...) if err != nil { diff --git a/plugins/vpp/l2plugin/data_resync.go b/plugins/vpp/l2plugin/data_resync.go index bfdfce6200..37497448a9 100644 --- a/plugins/vpp/l2plugin/data_resync.go +++ b/plugins/vpp/l2plugin/data_resync.go @@ -17,7 +17,6 @@ package l2plugin import ( "strings" - if_dump "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppdump" "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/l2idx" "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppdump" @@ -99,7 +98,7 @@ func (plugin *BDConfigurator) Resync(nbBDs []*l2.BridgeDomains_BridgeDomain) err // todo currently it is not possible to dump interfaces. In order to prevent BD removal, unset all available interfaces // Dump all interfaces - interfaceMap, err := if_dump.DumpInterfaces(plugin.log, plugin.vppChan, nil) + interfaceMap, err := plugin.ifHandler.DumpInterfaces() if err != nil { plugin.log.Error(err) wasErr = err diff --git a/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go b/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go index abe3a59c20..434506c04c 100644 --- a/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go +++ b/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go @@ -24,6 +24,7 @@ import ( "github.com/ligato/cn-infra/utils/addrs" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" + "github.com/ligato/cn-infra/logging/logrus" ) var RouteMessages = []govppapi.Message{ @@ -135,7 +136,8 @@ func vppAddDelRoute(route *Route, vppChan govppapi.Channel, delete bool, stopwat // VppAddRoute adds new route, according to provided input. Every route has to contain VRF ID (default is 0). func VppAddRoute(route *Route, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - if err := ifvppcalls.CreateVrfIfNeeded(route.VrfID, vppChan); err != nil { + ifHandler := ifvppcalls.NewIfVppHandler(vppChan, logrus.DefaultLogger(), stopwatch) // TODO temp change + if err := ifHandler.CreateVrfIfNeeded(route.VrfID); err != nil { return err } if route.Type == InterVrf { diff --git a/plugins/vpp/plugin_impl_vpp.go b/plugins/vpp/plugin_impl_vpp.go index 22cfdd1057..1ca2982d85 100644 --- a/plugins/vpp/plugin_impl_vpp.go +++ b/plugins/vpp/plugin_impl_vpp.go @@ -422,7 +422,7 @@ func (plugin *Plugin) initIF(ctx context.Context) error { // Interface configurator plugin.ifVppNotifChan = make(chan govppapi.Message, 100) plugin.ifConfigurator = &ifplugin.InterfaceConfigurator{} - if err := plugin.ifConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.Linux, plugin.ifVppNotifChan, plugin.ifMtu, plugin.enableStopwatch); err != nil { + if err := plugin.ifConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.Linux, plugin.ifVppNotifChan, plugin.ifMtu, nil); err != nil { return err } plugin.Log.Debug("ifConfigurator Initialized") @@ -447,21 +447,21 @@ func (plugin *Plugin) initIF(ctx context.Context) error { // BFD configurator plugin.bfdConfigurator = &ifplugin.BFDConfigurator{} - if err := plugin.bfdConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { + if err := plugin.bfdConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, nil); err != nil { return err } plugin.Log.Debug("bfdConfigurator Initialized") // STN configurator plugin.stnConfigurator = &ifplugin.StnConfigurator{} - if err := plugin.stnConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { + if err := plugin.stnConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, nil); err != nil { return err } plugin.Log.Debug("stnConfigurator Initialized") // NAT configurator plugin.natConfigurator = &ifplugin.NatConfigurator{} - if err := plugin.natConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { + if err := plugin.natConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, nil); err != nil { return err } plugin.Log.Debug("natConfigurator Initialized") From a95e9fc2ab0bcb43330c0b67afbcfa18249c0823 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 23 Jul 2018 10:14:24 +0200 Subject: [PATCH 031/174] interface plugin vppcalls api Signed-off-by: Vladimir Lavor --- plugins/rest/rest_handlers.go | 4 +- plugins/vpp/aclplugin/acl_config.go | 4 +- plugins/vpp/aclplugin/acl_config_test.go | 4 +- .../vpp/aclplugin/vppcalls/api_vppcalls.go | 2 + plugins/vpp/ifplugin/afpacket_config.go | 1 - plugins/vpp/ifplugin/bfd_config.go | 10 +- plugins/vpp/ifplugin/bfd_config_test.go | 7 +- plugins/vpp/ifplugin/data_resync_test.go | 9 +- plugins/vpp/ifplugin/interface_config.go | 8 +- plugins/vpp/ifplugin/interface_config_test.go | 7 +- plugins/vpp/ifplugin/nat_config.go | 8 +- plugins/vpp/ifplugin/nat_config_test.go | 7 +- plugins/vpp/ifplugin/stn_config.go | 10 +- plugins/vpp/ifplugin/stn_config_test.go | 7 +- .../vpp/ifplugin/vppcalls/admin_vppcalls.go | 9 +- .../ifplugin/vppcalls/afpacket_vppcalls.go | 2 +- plugins/vpp/ifplugin/vppcalls/api_vppcalls.go | 107 ++++++++++++------ plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go | 3 - .../vppcalls/dump_interface_vppcalls.go | 12 +- .../vppcalls/ip_container_vppcalls.go | 2 - plugins/vpp/ifplugin/vppcalls/vrf_vppcalls.go | 2 - plugins/vpp/l2plugin/bd_config.go | 2 +- .../l3plugin/vppcalls/arp_vppcalls_test.go | 2 +- .../vpp/l3plugin/vppcalls/route_vppcalls.go | 4 +- plugins/vpp/plugin_impl_vpp.go | 8 +- 25 files changed, 137 insertions(+), 104 deletions(-) diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index f0ecac86ed..dc278b96f5 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -30,7 +30,7 @@ import ( "github.com/unrolled/render" aclcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" - ifplugin "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppdump" + ifplugin "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" l2plugin "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppdump" l3plugin "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppdump" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" @@ -51,7 +51,7 @@ func (plugin *Plugin) interfacesGetHandler(formatter *render.Render) http.Handle } defer ch.Close() - ifHandler := ifcalls.NewIfVppHandler(ch, plugin.Log, nil) + ifHandler := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) res, err := ifHandler.DumpInterfaces() if err != nil { plugin.Log.Errorf("Error: %v", err) diff --git a/plugins/vpp/aclplugin/acl_config.go b/plugins/vpp/aclplugin/acl_config.go index ec26533892..f1040ed5bc 100644 --- a/plugins/vpp/aclplugin/acl_config.go +++ b/plugins/vpp/aclplugin/acl_config.go @@ -97,7 +97,9 @@ func (plugin *ACLConfigurator) Init(logger logging.PluginLogger, goVppMux govppm } // Configurator-wide stopwatch instance - plugin.stopwatch = measure.NewStopwatch("ACL-configurator", plugin.log) + if enableStopwatch { + plugin.stopwatch = measure.NewStopwatch("ACL-configurator", plugin.log) + } // ACL binary api handler plugin.aclHandler = vppcalls.NewAclVppHandler(plugin.vppChan, plugin.vppDumpChan, plugin.stopwatch) diff --git a/plugins/vpp/aclplugin/acl_config_test.go b/plugins/vpp/aclplugin/acl_config_test.go index cd716b5cf2..ca3b6e9edb 100644 --- a/plugins/vpp/aclplugin/acl_config_test.go +++ b/plugins/vpp/aclplugin/acl_config_test.go @@ -87,7 +87,7 @@ func TestAclConfiguratorInit(t *testing.T) { ctx.MockVpp.MockReply(&acl_api.ACLPluginGetVersionReply{}) // Test init - err := plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, nil, false) + err := plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, nil, true) Expect(err).To(BeNil()) err = plugin.Close() Expect(err).To(BeNil()) @@ -446,7 +446,7 @@ func aclTestSetup(t *testing.T, createIfs bool) (*vppcallmock.TestCtx, *core.Con // Configurator ctx.MockVpp.MockReply(&acl_api.ACLPluginGetVersionReply{1, 0}) plugin := &aclplugin.ACLConfigurator{} - err = plugin.Init(log, connection, ifIndexes, false) + err = plugin.Init(log, connection, ifIndexes, true) Expect(err).To(BeNil()) return ctx, connection, plugin diff --git a/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go b/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go index e137c03f1c..36803cb291 100644 --- a/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go @@ -28,6 +28,7 @@ type AclVppAPI interface { AclVppRead } +// AclVppWrite provides write methods for ACL plugin type AclVppWrite interface { // AddIPAcl create new L3/4 ACL. Input index == 0xffffffff, VPP provides index in reply. AddIPAcl(rules []*acl.AccessLists_Acl_Rule, aclName string) (uint32, error) @@ -55,6 +56,7 @@ type AclVppWrite interface { RemoveMacIPIngressACLFromInterfaces(removedACLIndex uint32, ifIndices []uint32) error } +// AclVppRead provides read methods for ACL plugin type AclVppRead interface { // GetAclPluginVersion returns version of the VPP ACL plugin GetAclPluginVersion() (string, error) diff --git a/plugins/vpp/ifplugin/afpacket_config.go b/plugins/vpp/ifplugin/afpacket_config.go index ec265825ff..52a2b106aa 100644 --- a/plugins/vpp/ifplugin/afpacket_config.go +++ b/plugins/vpp/ifplugin/afpacket_config.go @@ -17,7 +17,6 @@ package ifplugin import ( "errors" - govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/logging" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" diff --git a/plugins/vpp/ifplugin/bfd_config.go b/plugins/vpp/ifplugin/bfd_config.go index 9dbe4a497d..1a83350fe2 100644 --- a/plugins/vpp/ifplugin/bfd_config.go +++ b/plugins/vpp/ifplugin/bfd_config.go @@ -50,7 +50,7 @@ type BFDConfigurator struct { keysIndexes idxvpp.NameToIdxRW echoFunctionIndex idxvpp.NameToIdxRW - vppChan govppapi.VPPChannel + vppChan govppapi.Channel // VPP API handler bfdHandler vppcalls.BfdVppAPI @@ -58,13 +58,15 @@ type BFDConfigurator struct { // Init members and channels func (plugin *BFDConfigurator) Init(logger logging.PluginLogger, goVppMux govppmux.API, swIfIndexes ifaceidx.SwIfIndex, - stopwatch *measure.Stopwatch) (err error) { + enableStopwatch bool) (err error) { // Logger plugin.log = logger.NewLogger("-bfd-conf") plugin.log.Infof("Initializing BFD configurator") - // Stopwatch - plugin.stopwatch = stopwatch + // Configurator-wide stopwatch instance + if enableStopwatch { + plugin.stopwatch = measure.NewStopwatch("BFD-configurator", plugin.log) + } // Mappings plugin.ifIndexes = swIfIndexes diff --git a/plugins/vpp/ifplugin/bfd_config_test.go b/plugins/vpp/ifplugin/bfd_config_test.go index 9dace02759..3d1372e74a 100644 --- a/plugins/vpp/ifplugin/bfd_config_test.go +++ b/plugins/vpp/ifplugin/bfd_config_test.go @@ -23,7 +23,6 @@ import ( "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/idxvpp/nametoidx" bfd_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" @@ -44,9 +43,8 @@ func TestBfdConfiguratorInit(t *testing.T) { defer connection.Disconnect() plugin := &ifplugin.BFDConfigurator{} - stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) err := plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, - nil, stopwatch) + nil, true) Expect(err).To(BeNil()) err = plugin.Close() @@ -577,8 +575,7 @@ func bfdTestSetup(t *testing.T) (*vppcallmock.TestCtx, *core.Connection, *ifplug swIfIndices := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "stn", nil)) // Configurator plugin := &ifplugin.BFDConfigurator{} - stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) - err = plugin.Init(log, connection, swIfIndices, stopwatch) + err = plugin.Init(log, connection, swIfIndices, true) Expect(err).To(BeNil()) return ctx, connection, plugin, swIfIndices diff --git a/plugins/vpp/ifplugin/data_resync_test.go b/plugins/vpp/ifplugin/data_resync_test.go index 4e5c25acde..25e65eb827 100644 --- a/plugins/vpp/ifplugin/data_resync_test.go +++ b/plugins/vpp/ifplugin/data_resync_test.go @@ -22,7 +22,6 @@ import ( govpp "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/idxvpp/nametoidx" "github.com/ligato/vpp-agent/plugins/vpp/binapi/af_packet" bfdApi "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" @@ -146,7 +145,7 @@ func interfaceConfiguratorTestInitialization(t *testing.T, mocks []*vppReplyMock ifVppNotifCh := make(chan govppapi.Message, 100) plugLog := logging.ForPlugin("tests", logrus.NewLogRegistry()) - err = plugin.Init(plugLog, conn, nil, ifVppNotifCh, 0, false) + err = plugin.Init(plugLog, conn, nil, ifVppNotifCh, 0, true) Expect(err).To(BeNil()) return plugin, conn @@ -179,7 +178,7 @@ func bfdConfiguratorTestInitialization(t *testing.T, mocks []*vppReplyMock) (*if logrus.NewLogRegistry()), connection, index, - measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger())) + true) Expect(err).To(BeNil()) @@ -213,7 +212,7 @@ func stnConfiguratorTestInitialization(t *testing.T, mocks []*vppReplyMock) (*if logrus.NewLogRegistry()), connection, index, - measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger())) + true) Expect(err).To(BeNil()) return plugin, connection @@ -246,7 +245,7 @@ func natConfiguratorTestInitialization(t *testing.T, mocks []*vppReplyMock) (*if logrus.NewLogRegistry()), connection, index, - measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger())) + true) Expect(err).To(BeNil()) return plugin, index, connection diff --git a/plugins/vpp/ifplugin/interface_config.go b/plugins/vpp/ifplugin/interface_config.go index b9a9b8decd..814d1d91af 100644 --- a/plugins/vpp/ifplugin/interface_config.go +++ b/plugins/vpp/ifplugin/interface_config.go @@ -75,13 +75,15 @@ type InterfaceConfigurator struct { // Init members (channels...) and start go routines func (plugin *InterfaceConfigurator) Init(logger logging.PluginLogger, goVppMux govppmux.API, linux interface{}, - notifChan chan govppapi.Message, defaultMtu uint32, stopwatch *measure.Stopwatch) (err error) { + notifChan chan govppapi.Message, defaultMtu uint32, enableStopwatch bool) (err error) { // Logger plugin.log = logger.NewLogger("-if-conf") plugin.log.Debug("Initializing Interface configurator") - // Stopwatch instance - plugin.stopwatch = stopwatch + // Configurator-wide stopwatch instance + if enableStopwatch { + plugin.stopwatch = measure.NewStopwatch("Interface-configurator", plugin.log) + } // State notification channel plugin.NotifChan = notifChan diff --git a/plugins/vpp/ifplugin/interface_config_test.go b/plugins/vpp/ifplugin/interface_config_test.go index 76d3d3bf54..a877faf54b 100644 --- a/plugins/vpp/ifplugin/interface_config_test.go +++ b/plugins/vpp/ifplugin/interface_config_test.go @@ -24,7 +24,6 @@ import ( govpp "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/af_packet" dhcp_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/dhcp" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" @@ -61,9 +60,8 @@ func TestInterfaceConfiguratorInit(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) // Test init - stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) err = plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, - nil, ifVppNotifChan, 0, stopwatch) + nil, ifVppNotifChan, 0, true) Expect(err).To(BeNil()) Expect(plugin.IsSocketFilenameCached("test-socket-filename")).To(BeTrue()) // Test close @@ -1435,8 +1433,7 @@ func ifTestSetup(t *testing.T) (*vppcallmock.TestCtx, *govpp.Connection, *ifplug // Configurator plugin := &ifplugin.InterfaceConfigurator{} notifChan := make(chan govppapi.Message, 5) - stopwatch := measure.NewStopwatch("test-stopwatch", log) - err = plugin.Init(log, connection, 1, notifChan, 1500, stopwatch) + err = plugin.Init(log, connection, 1, notifChan, 1500, true) Expect(err).To(BeNil()) return ctx, connection, plugin diff --git a/plugins/vpp/ifplugin/nat_config.go b/plugins/vpp/ifplugin/nat_config.go index 984cf4843f..ecac65e5f0 100644 --- a/plugins/vpp/ifplugin/nat_config.go +++ b/plugins/vpp/ifplugin/nat_config.go @@ -86,13 +86,15 @@ type NatConfigurator struct { // Init NAT configurator func (plugin *NatConfigurator) Init(logger logging.PluginLogger, goVppMux govppmux.API, ifIndexes ifaceidx.SwIfIndex, - stopwatch *measure.Stopwatch) (err error) { + enableStopwatch bool) (err error) { // Logger plugin.log = logger.NewLogger("-nat-conf") plugin.log.Debug("Initializing NAT configurator") - // Stopwatch - plugin.stopwatch = stopwatch + // Configurator-wide stopwatch instance + if enableStopwatch { + plugin.stopwatch = measure.NewStopwatch("NAT-configurator", plugin.log) + } // Mappings plugin.ifIndexes = ifIndexes diff --git a/plugins/vpp/ifplugin/nat_config_test.go b/plugins/vpp/ifplugin/nat_config_test.go index 54e55a2f33..20c70753d4 100644 --- a/plugins/vpp/ifplugin/nat_config_test.go +++ b/plugins/vpp/ifplugin/nat_config_test.go @@ -21,7 +21,6 @@ import ( "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/idxvpp/nametoidx" nat_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/nat" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin" @@ -40,9 +39,8 @@ func TestNatConfiguratorInit(t *testing.T) { defer connection.Disconnect() plugin := &ifplugin.NatConfigurator{} - stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) err := plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, - nil, stopwatch) + nil, true) Expect(err).To(BeNil()) err = plugin.Close() @@ -1225,8 +1223,7 @@ func natTestSetup(t *testing.T) (*vppcallmock.TestCtx, *core.Connection, *ifplug swIfIndices := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "nat", nil)) // Configurator plugin := &ifplugin.NatConfigurator{} - stopwatch := measure.NewStopwatch("test-stopwatch", log) - err = plugin.Init(log, connection, swIfIndices, stopwatch) + err = plugin.Init(log, connection, swIfIndices, true) Expect(err).To(BeNil()) return ctx, connection, plugin, swIfIndices diff --git a/plugins/vpp/ifplugin/stn_config.go b/plugins/vpp/ifplugin/stn_config.go index c641929d77..2577d800d3 100644 --- a/plugins/vpp/ifplugin/stn_config.go +++ b/plugins/vpp/ifplugin/stn_config.go @@ -46,7 +46,7 @@ type StnConfigurator struct { unstoredIndexes idxvpp.NameToIdxRW unstoredIndexSeq uint32 // VPP - vppChan govppapi.VPPChannel + vppChan govppapi.Channel // VPP API handler stnHandler vppcalls.StnVppAPI // Stopwatch @@ -67,13 +67,15 @@ func (plugin *StnConfigurator) UnstoredIndexExistsFor(name string) bool { // Init initializes STN configurator func (plugin *StnConfigurator) Init(logger logging.PluginLogger, goVppMux govppmux.API, ifIndexes ifaceidx.SwIfIndex, - stopwatch *measure.Stopwatch) (err error) { + enableStopwatch bool) (err error) { // Init logger plugin.log = logger.NewLogger("-stn-conf") plugin.log.Debug("Initializing STN configurator") - // Stopwatch - plugin.stopwatch = stopwatch + // Configurator-wide stopwatch instance + if enableStopwatch { + plugin.stopwatch = measure.NewStopwatch("STN-configurator", plugin.log) + } // Init VPP API channel plugin.vppChan, err = goVppMux.NewAPIChannel() diff --git a/plugins/vpp/ifplugin/stn_config_test.go b/plugins/vpp/ifplugin/stn_config_test.go index 909ca35a98..bda1b027ff 100644 --- a/plugins/vpp/ifplugin/stn_config_test.go +++ b/plugins/vpp/ifplugin/stn_config_test.go @@ -22,7 +22,6 @@ import ( "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/idxvpp/nametoidx" stn_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/stn" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" @@ -42,9 +41,8 @@ func TestStnConfiguratorInit(t *testing.T) { defer connection.Disconnect() plugin := &ifplugin.StnConfigurator{} - stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) err := plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, - nil, stopwatch) + nil, true) Expect(err).To(BeNil()) err = plugin.Close() @@ -423,8 +421,7 @@ func stnTestSetup(t *testing.T) (*vppcallmock.TestCtx, *core.Connection, *ifplug swIfIndices := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "stn", nil)) // Configurator plugin := &ifplugin.StnConfigurator{} - stopwatch := measure.NewStopwatch("test-stopwatch", log) - err = plugin.Init(log, connection, swIfIndices, stopwatch) + err = plugin.Init(log, connection, swIfIndices, true) Expect(err).To(BeNil()) return ctx, connection, plugin, swIfIndices diff --git a/plugins/vpp/ifplugin/vppcalls/admin_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/admin_vppcalls.go index 3c7ccc6ea8..88016a4b16 100644 --- a/plugins/vpp/ifplugin/vppcalls/admin_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/admin_vppcalls.go @@ -18,8 +18,6 @@ import ( "fmt" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" ) @@ -89,3 +87,10 @@ func (handler *ifVppHandler) handleInterfaceTag(tag string, ifIdx uint32, isAdd return nil } + +func boolToUint(input bool) uint8 { + if input { + return 1 + } + return 0 +} diff --git a/plugins/vpp/ifplugin/vppcalls/afpacket_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/afpacket_vppcalls.go index d190983d49..258b0ced63 100644 --- a/plugins/vpp/ifplugin/vppcalls/afpacket_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/afpacket_vppcalls.go @@ -18,8 +18,8 @@ import ( "fmt" "time" - govppapi "git.fd.io/govpp.git/api" "net" + "github.com/ligato/vpp-agent/plugins/vpp/binapi/af_packet" intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" ) diff --git a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go index 46799d9163..affa45c1bb 100644 --- a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go @@ -17,19 +17,28 @@ package vppcalls import ( "net" + "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/idxvpp" bfd_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" + "github.com/ligato/vpp-agent/plugins/vpp/binapi/stn" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/model/nat" - "github.com/ligato/vpp-agent/plugins/vpp/binapi/stn" ) // IfVppAPI provides methods for creating and managing BFD type IfVppAPI interface { + IfVppWrite + IfVppRead + // CheckMsgCompatibilityForInterface checks if interface CRSs are compatible with VPP in runtime. + CheckMsgCompatibilityForInterface() error +} + +// IfVppWrite provides write methods for interface plugin +type IfVppWrite interface { // AddAfPacketInterface calls AfPacketCreate VPP binary API. AddAfPacketInterface(ifName string, hwAddr string, afPacketIntf *interfaces.Interfaces_Interface_Afpacket) (swIndex uint32, err error) // DeleteAfPacketInterface calls AfPacketDelete VPP binary API. @@ -50,13 +59,6 @@ type IfVppAPI interface { AddVxlanTunnel(ifName string, vxlanIntf *interfaces.Interfaces_Interface_Vxlan, encapVrf, multicastIf uint32) (swIndex uint32, err error) // DeleteVxlanTunnel calls AddDelVxlanTunnelReq with flag add=0. DeleteVxlanTunnel(ifName string, idx uint32, vxlanIntf *interfaces.Interfaces_Interface_Vxlan) error - // DumpInterfaces dumps VPP interface data into the northbound API data structure - // map indexed by software interface index. - // - // LIMITATIONS: - // - there is no af_packet dump binary API. We relay on naming conventions of the internal VPP interface names - // - ip.IPAddressDetails has wrong internal structure, as a workaround we need to handle them as notifications - DumpInterfaces() (map[uint32]*Interface, error) // InterfaceAdminDown calls binary API SwInterfaceSetFlagsReply with AdminUpDown=0. InterfaceAdminDown(ifIdx uint32) error // InterfaceAdminUp calls binary API SwInterfaceSetFlagsReply with AdminUpDown=1. @@ -91,20 +93,37 @@ type IfVppAPI interface { SetRxMode(ifIdx uint32, rxModeSettings *interfaces.Interfaces_Interface_RxModeSettings) error // SetRxPlacement configures rx-placement for interface SetRxPlacement(vppInternalName string, rxPlacement *interfaces.Interfaces_Interface_RxPlacementSettings) error - // GetInterfaceVRF assigns VRF table to interface - GetInterfaceVRF(ifIdx uint32) (vrfID uint32, err error) // SetInterfaceVRF retrieves VRF table from interface SetInterfaceVRF(ifaceIndex, vrfID uint32) error // CreateVrfIfNeeded checks if VRF exists and creates it if not CreateVrfIfNeeded(vrfID uint32) error +} + +// IfVppRead provides read methods for interface plugin +type IfVppRead interface { + // DumpInterfaces dumps VPP interface data into the northbound API data structure + // map indexed by software interface index. + // + // LIMITATIONS: + // - there is no af_packet dump binary API. We relay on naming conventions of the internal VPP interface names + // - ip.IPAddressDetails has wrong internal structure, as a workaround we need to handle them as notifications + DumpInterfaces() (map[uint32]*Interface, error) + // GetInterfaceVRF assigns VRF table to interface + GetInterfaceVRF(ifIdx uint32) (vrfID uint32, err error) // DumpMemifSocketDetails dumps memif socket details from the VPP DumpMemifSocketDetails() (map[string]uint32, error) - // CheckMsgCompatibilityForInterface checks if interface CRSs are compatible with VPP in runtime. - CheckMsgCompatibilityForInterface() error } // BfdVppAPI provides methods for managing BFD type BfdVppAPI interface { + BfdVppWrite + BfdVppRead + // CheckMsgCompatibilityForBfd checks if bfd CRSs are compatible with VPP in runtime. + CheckMsgCompatibilityForBfd() error +} + +// BfdVppWrite provides write methods for BFD +type BfdVppWrite interface { // AddBfdUDPSession adds new BFD session with authentication if available. AddBfdUDPSession(bfdSess *bfd.SingleHopBFD_Session, ifIdx uint32, bfdKeyIndexes idxvpp.NameToIdx) error // AddBfdUDPSessionFromDetails adds new BFD session with authentication if available. @@ -113,26 +132,36 @@ type BfdVppAPI interface { ModifyBfdUDPSession(bfdSess *bfd.SingleHopBFD_Session, swIfIndexes ifaceidx.SwIfIndex) error // DeleteBfdUDPSession removes an existing BFD session. DeleteBfdUDPSession(ifIndex uint32, sourceAddress string, destAddress string) error - // DumpBfdUDPSessions returns a list of BFD session's metadata - DumpBfdUDPSessions() ([]*bfd_api.BfdUDPSessionDetails, error) - // DumpBfdUDPSessionsWithID returns a list of BFD session's metadata filtered according to provided authentication key - DumpBfdUDPSessionsWithID(authKeyIndex uint32) ([]*bfd_api.BfdUDPSessionDetails, error) // SetBfdUDPAuthenticationKey creates new authentication key. SetBfdUDPAuthenticationKey(bfdKey *bfd.SingleHopBFD_Key) error // DeleteBfdUDPAuthenticationKey removes the authentication key. DeleteBfdUDPAuthenticationKey(bfdKey *bfd.SingleHopBFD_Key) error - // DumpBfdKeys looks up all BFD auth keys and saves their name-to-index mapping - DumpBfdKeys() (keys []*bfd_api.BfdAuthKeysDetails, err error) // AddBfdEchoFunction sets up an echo function for the interface. AddBfdEchoFunction(bfdInput *bfd.SingleHopBFD_EchoFunction, swIfIndexes ifaceidx.SwIfIndex) error // DeleteBfdEchoFunction removes an echo function. DeleteBfdEchoFunction() error - // CheckMsgCompatibilityForBfd checks if bfd CRSs are compatible with VPP in runtime. - CheckMsgCompatibilityForBfd() error +} + +// BfdVppRead provides read methods for BFD +type BfdVppRead interface { + // DumpBfdUDPSessions returns a list of BFD session's metadata + DumpBfdUDPSessions() ([]*bfd_api.BfdUDPSessionDetails, error) + // DumpBfdUDPSessionsWithID returns a list of BFD session's metadata filtered according to provided authentication key + DumpBfdUDPSessionsWithID(authKeyIndex uint32) ([]*bfd_api.BfdUDPSessionDetails, error) + // DumpBfdKeys looks up all BFD auth keys and saves their name-to-index mapping + DumpBfdKeys() (keys []*bfd_api.BfdAuthKeysDetails, err error) } // NatVppAPI provides methods for managing NAT type NatVppAPI interface { + NatVppWrite + NatVppRead + // CheckMsgCompatibilityForNat verifies compatibility of used binary API calls + CheckMsgCompatibilityForNat() error +} + +// NatVppWrite provides write methods for NAT +type NatVppWrite interface { // SetNat44Forwarding configures global forwarding setup for NAT44 SetNat44Forwarding(enableFwd bool) error // EnableNat44Interface enables NAT feature for provided interface @@ -160,58 +189,70 @@ type NatVppAPI interface { AddNat44StaticMappingLb(ctx *StaticMappingLbContext) error // DelNat44StaticMappingLb removes existing static mapping entry with load balancer DelNat44StaticMappingLb(ctx *StaticMappingLbContext) error +} + +// NatVppRead provides read methods for NAT +type NatVppRead interface { // Nat44GlobalConfigDump returns global config in NB format Nat44GlobalConfigDump(swIfIndices ifaceidx.SwIfIndex) (*nat.Nat44Global, error) // NAT44NatDump dumps all types of mappings, sorts it according to tag (DNAT label) and creates a set of DNAT configurations NAT44DNatDump(swIfIndices ifaceidx.SwIfIndex) (*nat.Nat44DNat, error) // Nat44InterfaceDump returns a list of interfaces enabled for NAT44 Nat44InterfaceDump(swIfIndices ifaceidx.SwIfIndex) (interfaces []*nat.Nat44Global_NatInterface, err error) - // CheckMsgCompatibilityForNat verifies compatibility of used binary API calls - CheckMsgCompatibilityForNat() error } // StnVppAPI provides methods for managing STN type StnVppAPI interface { + StnVppWrite + StnVppRead + // CheckMsgCompatibilityForStn verifies compatibility of used binary API calls + CheckMsgCompatibilityForStn() error +} + +// StnVppWrite provides write methods for STN +type StnVppWrite interface { // AddStnRule calls StnAddDelRule bin API with IsAdd=1 AddStnRule(ifIdx uint32, addr *net.IP) error // DelStnRule calls StnAddDelRule bin API with IsAdd=0 DelStnRule(ifIdx uint32, addr *net.IP) error +} + +// StnVppRead provides read methods for STN +type StnVppRead interface { // DumpStnRules returns a list of all STN rules configured on the VPP DumpStnRules() (rules []*stn.StnRulesDetails, err error) - // CheckMsgCompatibilityForStn verifies compatibility of used binary API calls - CheckMsgCompatibilityForStn() error } // ifVppHandler is accessor for interface-related vppcalls methods type ifVppHandler struct { stopwatch *measure.Stopwatch - callsChannel VPPChannel + callsChannel api.Channel log logging.Logger } // bfdVppHandler is accessor for BFD-related vppcalls methods type bfdVppHandler struct { stopwatch *measure.Stopwatch - callsChannel VPPChannel + callsChannel api.Channel log logging.Logger } // natVppHandler is accessor for NAT-related vppcalls methods type natVppHandler struct { stopwatch *measure.Stopwatch - callsChannel VPPChannel - dumpChannel VPPChannel + callsChannel api.Channel + dumpChannel api.Channel log logging.Logger } // stnVppHandler is accessor for STN-related vppcalls methods type stnVppHandler struct { stopwatch *measure.Stopwatch - callsChannel VPPChannel + callsChannel api.Channel } // NewIfVppHandler creates new instance of interface vppcalls handler -func NewIfVppHandler(callsChan VPPChannel, log logging.Logger, stopwatch *measure.Stopwatch) *ifVppHandler { +func NewIfVppHandler(callsChan api.Channel, log logging.Logger, stopwatch *measure.Stopwatch) *ifVppHandler { return &ifVppHandler{ callsChannel: callsChan, stopwatch: stopwatch, @@ -220,7 +261,7 @@ func NewIfVppHandler(callsChan VPPChannel, log logging.Logger, stopwatch *measur } // NewBfdVppHandler creates new instance of BFD vppcalls handler -func NewBfdVppHandler(callsChan VPPChannel, log logging.Logger, stopwatch *measure.Stopwatch) *bfdVppHandler { +func NewBfdVppHandler(callsChan api.Channel, log logging.Logger, stopwatch *measure.Stopwatch) *bfdVppHandler { return &bfdVppHandler{ callsChannel: callsChan, stopwatch: stopwatch, @@ -229,7 +270,7 @@ func NewBfdVppHandler(callsChan VPPChannel, log logging.Logger, stopwatch *measu } // NewNatVppHandler creates new instance of NAT vppcalls handler -func NewNatVppHandler(callsChan, dumpChan VPPChannel, log logging.Logger, stopwatch *measure.Stopwatch) *natVppHandler { +func NewNatVppHandler(callsChan, dumpChan api.Channel, log logging.Logger, stopwatch *measure.Stopwatch) *natVppHandler { return &natVppHandler{ callsChannel: callsChan, dumpChannel: dumpChan, @@ -239,7 +280,7 @@ func NewNatVppHandler(callsChan, dumpChan VPPChannel, log logging.Logger, stopwa } // NewStnVppHandler creates new instance of STN vppcalls handler -func NewStnVppHandler(callsChan VPPChannel, stopwatch *measure.Stopwatch) *stnVppHandler { +func NewStnVppHandler(callsChan api.Channel, stopwatch *measure.Stopwatch) *stnVppHandler { return &stnVppHandler{ callsChannel: callsChan, stopwatch: stopwatch, diff --git a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go index 0622d8c647..fc6cbb6b02 100644 --- a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go @@ -19,9 +19,6 @@ import ( "net" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/cn-infra/utils/addrs" "github.com/ligato/vpp-agent/idxvpp" bfd_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" diff --git a/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go index 58b819779e..8a9bb8c515 100644 --- a/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go @@ -22,7 +22,6 @@ import ( "time" "git.fd.io/govpp.git/api" - govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" @@ -83,14 +82,11 @@ func (handler *ifVppHandler) DumpInterfaces() (map[uint32]*Interface, error) { ifs[ifDetails.SwIfIndex] = iface if iface.Type == ifnb.InterfaceType_AF_PACKET_INTERFACE { - err := handler.dumpAFPacketDetails(ifs, ifDetails.SwIfIndex, iface.VPPInternalName) - if err != nil { - return nil, err - } + fillAFPacketDetails(ifs, ifDetails.SwIfIndex, iface.VPPInternalName) } } - log.Debugf("dumped %d interfaces", len(ifs)) + handler.log.Debugf("dumped %d interfaces", len(ifs)) // SwInterfaceDump time timeLog := measure.GetTimeLog(interfaces.SwInterfaceDump{}, handler.stopwatch) @@ -222,8 +218,8 @@ func (handler *ifVppHandler) processIPDetails(ifs map[uint32]*Interface, ipDetai ifs[ipDetails.SwIfIndex].IpAddresses = append(ifs[ipDetails.SwIfIndex].IpAddresses, ipAddr) } -// dumpAFPacketDetails fills af_packet interface details into the provided interface map. -func (handler *ifVppHandler) dumpAFPacketDetails(ifs map[uint32]*Interface, swIfIndex uint32, ifName string) error { +// fillAFPacketDetails fills af_packet interface details into the provided interface map. +func fillAFPacketDetails(ifs map[uint32]*Interface, swIfIndex uint32, ifName string) { ifs[swIfIndex].Afpacket = &ifnb.Interfaces_Interface_Afpacket{ HostIfName: strings.TrimPrefix(ifName, "host-"), } diff --git a/plugins/vpp/ifplugin/vppcalls/ip_container_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/ip_container_vppcalls.go index c4991b5880..fa01d28e2b 100644 --- a/plugins/vpp/ifplugin/vppcalls/ip_container_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/ip_container_vppcalls.go @@ -18,8 +18,6 @@ import ( "fmt" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/cn-infra/utils/addrs" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" ) diff --git a/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls.go index 4b090ff3d4..11e654ea69 100644 --- a/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/vrf_vppcalls.go @@ -17,8 +17,6 @@ package vppcalls import ( "fmt" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" diff --git a/plugins/vpp/l2plugin/bd_config.go b/plugins/vpp/l2plugin/bd_config.go index 2ee69a20cc..d908c45cd6 100644 --- a/plugins/vpp/l2plugin/bd_config.go +++ b/plugins/vpp/l2plugin/bd_config.go @@ -28,10 +28,10 @@ import ( "github.com/ligato/vpp-agent/plugins/govppmux" l2ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" + ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/l2idx" "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" ) // BDConfigurator runs in the background in its own goroutine where it watches for any changes diff --git a/plugins/vpp/l3plugin/vppcalls/arp_vppcalls_test.go b/plugins/vpp/l3plugin/vppcalls/arp_vppcalls_test.go index d2ec55799f..b12b63a8cd 100644 --- a/plugins/vpp/l3plugin/vppcalls/arp_vppcalls_test.go +++ b/plugins/vpp/l3plugin/vppcalls/arp_vppcalls_test.go @@ -32,7 +32,7 @@ var arpEntries = []vppcalls.ArpEntry{ { Interface: 1, IPAddress: []byte{192, 168, 10, 22}, - MacAddress:"6C:45:59:59:8E:BD", + MacAddress: "6C:45:59:59:8E:BD", Static: false, }, { diff --git a/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go b/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go index 434506c04c..57300fb08d 100644 --- a/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go +++ b/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go @@ -20,11 +20,11 @@ import ( "time" govppapi "git.fd.io/govpp.git/api" + "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/cn-infra/utils/addrs" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" - "github.com/ligato/cn-infra/logging/logrus" ) var RouteMessages = []govppapi.Message{ @@ -141,7 +141,7 @@ func VppAddRoute(route *Route, vppChan govppapi.Channel, stopwatch *measure.Stop return err } if route.Type == InterVrf { - if err := ifvppcalls.CreateVrfIfNeeded(route.ViaVrfId, vppChan); err != nil { + if err := ifHandler.CreateVrfIfNeeded(route.ViaVrfId); err != nil { return err } } diff --git a/plugins/vpp/plugin_impl_vpp.go b/plugins/vpp/plugin_impl_vpp.go index 1ca2982d85..22cfdd1057 100644 --- a/plugins/vpp/plugin_impl_vpp.go +++ b/plugins/vpp/plugin_impl_vpp.go @@ -422,7 +422,7 @@ func (plugin *Plugin) initIF(ctx context.Context) error { // Interface configurator plugin.ifVppNotifChan = make(chan govppapi.Message, 100) plugin.ifConfigurator = &ifplugin.InterfaceConfigurator{} - if err := plugin.ifConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.Linux, plugin.ifVppNotifChan, plugin.ifMtu, nil); err != nil { + if err := plugin.ifConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.Linux, plugin.ifVppNotifChan, plugin.ifMtu, plugin.enableStopwatch); err != nil { return err } plugin.Log.Debug("ifConfigurator Initialized") @@ -447,21 +447,21 @@ func (plugin *Plugin) initIF(ctx context.Context) error { // BFD configurator plugin.bfdConfigurator = &ifplugin.BFDConfigurator{} - if err := plugin.bfdConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, nil); err != nil { + if err := plugin.bfdConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { return err } plugin.Log.Debug("bfdConfigurator Initialized") // STN configurator plugin.stnConfigurator = &ifplugin.StnConfigurator{} - if err := plugin.stnConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, nil); err != nil { + if err := plugin.stnConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { return err } plugin.Log.Debug("stnConfigurator Initialized") // NAT configurator plugin.natConfigurator = &ifplugin.NatConfigurator{} - if err := plugin.natConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, nil); err != nil { + if err := plugin.natConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { return err } plugin.Log.Debug("natConfigurator Initialized") From 75bf64d69a8b89b881f0dbedeb2ee89af1c3e960 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 23 Jul 2018 10:19:14 +0200 Subject: [PATCH 032/174] simplify Signed-off-by: Vladimir Lavor --- plugins/govppmux/govpp_channel.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/govppmux/govpp_channel.go b/plugins/govppmux/govpp_channel.go index aacde6674c..bfed46541e 100644 --- a/plugins/govppmux/govpp_channel.go +++ b/plugins/govppmux/govpp_channel.go @@ -52,10 +52,9 @@ func (r *govppRequestCtx) ReceiveReply(reply govppapi.Message) error { time.Sleep(timeout) logrus.DefaultLogger().Warnf("Govppmux: retrying binary API message %v, attempt: %d", r.requestMsg.GetMessageName(), attemptIdx) - if err = r.sendRequest(r.requestMsg).ReceiveReply(reply); err == core.ErrNotConnected { - continue + if err = r.sendRequest(r.requestMsg).ReceiveReply(reply); err != core.ErrNotConnected { + return err } - return err } } From 6ea9efd6a6638370158d68acc0c659b978acbf1f Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 23 Jul 2018 15:48:40 +0200 Subject: [PATCH 033/174] move message compatibility check to handler init Signed-off-by: Vladimir Lavor --- plugins/rest/rest_handlers.go | 42 +++++++++++++++--- plugins/vpp/aclplugin/acl_config.go | 6 +-- plugins/vpp/aclplugin/acl_config_test.go | 4 +- .../aclplugin/vppcalls/acl_vppcalls_test.go | 18 +++++--- .../vpp/aclplugin/vppcalls/api_vppcalls.go | 8 +++- .../aclplugin/vppcalls/dump_vppcalls_test.go | 40 ++++++++++++----- .../vppcalls/interfaces_vppcalls_test.go | 20 +++++---- plugins/vpp/ifplugin/afpacket_config.go | 2 +- plugins/vpp/ifplugin/afpacket_config_test.go | 18 +++++--- plugins/vpp/ifplugin/bfd_config.go | 5 +-- plugins/vpp/ifplugin/interface_config.go | 3 +- plugins/vpp/ifplugin/nat_config.go | 3 +- plugins/vpp/ifplugin/stn_config.go | 3 +- .../ifplugin/vppcalls/admin_vppcalls_test.go | 3 +- plugins/vpp/ifplugin/vppcalls/api_vppcalls.go | 44 ++++++++++++------- .../ifplugin/vppcalls/bfd_vppcalls_test.go | 3 +- .../vpp/ifplugin/vppcalls/compat_vppcalls.go | 4 ++ .../vppcalls/dump_nat_vppcalls_test.go | 3 +- .../ifplugin/vppcalls/stn_vppcalls_test.go | 3 +- plugins/vpp/ipsecplugin/ipsec_config.go | 4 +- plugins/vpp/l2plugin/bd_config.go | 4 +- .../vpp/l3plugin/vppcalls/route_vppcalls.go | 5 ++- tests/vppcallmock/vpp_ctx_mock.go | 5 +-- 23 files changed, 165 insertions(+), 85 deletions(-) diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index dc278b96f5..8fc520aaa6 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -51,7 +51,12 @@ func (plugin *Plugin) interfacesGetHandler(formatter *render.Render) http.Handle } defer ch.Close() - ifHandler := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) + ifHandler, err := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } res, err := ifHandler.DumpInterfaces() if err != nil { plugin.Log.Errorf("Error: %v", err) @@ -257,7 +262,12 @@ func (plugin *Plugin) interfaceACLGetHandler(formatter *render.Render) http.Hand defer ch.Close() swIndex := uint32(swIndexuInt64) - aclHandler := aclcalls.NewAclVppHandler(ch, nil, nil) + aclHandler, err := aclcalls.NewAclVppHandler(ch, ch, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } res, err := aclHandler.DumpInterfaceIPAcls(swIndex) if err != nil { plugin.Deps.Log.Errorf("Error: %v", err) @@ -290,7 +300,12 @@ func (plugin *Plugin) ipACLGetHandler(formatter *render.Render) http.HandlerFunc return } defer ch.Close() - aclHandler := aclcalls.NewAclVppHandler(ch, nil, nil) + aclHandler, err := aclcalls.NewAclVppHandler(ch, ch, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } res, err := aclHandler.DumpIPACL(nil) if err != nil { plugin.Deps.Log.Errorf("Error: %v", err) @@ -315,7 +330,12 @@ func (plugin *Plugin) macipACLGetHandler(formatter *render.Render) http.HandlerF formatter.JSON(w, http.StatusInternalServerError, err) return } - aclHandler := aclcalls.NewAclVppHandler(ch, nil, nil) + aclHandler, err := aclcalls.NewAclVppHandler(ch, ch, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } res, err := aclHandler.DumpMACIPACL(nil) if err != nil { plugin.Log.Errorf("Error: %v", err) @@ -428,7 +448,12 @@ func (plugin *Plugin) ipACLPostHandler(formatter *render.Render) http.HandlerFun var aclIndex struct { Idx uint32 `json:"acl_index"` } - aclHandler := aclcalls.NewAclVppHandler(ch, nil, nil) + aclHandler, err := aclcalls.NewAclVppHandler(ch, ch, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } aclIndex.Idx, err = aclHandler.AddIPAcl(aclParam.Rules, aclParam.AclName) if err != nil { plugin.Deps.Log.Errorf("Error: %v", err) @@ -470,7 +495,12 @@ func (plugin *Plugin) macipACLPostHandler(formatter *render.Render) http.Handler var aclIndex struct { Idx uint32 `json:"acl_index"` } - aclHandler := aclcalls.NewAclVppHandler(ch, nil, nil) + aclHandler, err := aclcalls.NewAclVppHandler(ch, ch, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } aclIndex.Idx, err = aclHandler.AddMacIPAcl(aclParam.Rules, aclParam.AclName) if err != nil { plugin.Log.Errorf("Error: %v", err) diff --git a/plugins/vpp/aclplugin/acl_config.go b/plugins/vpp/aclplugin/acl_config.go index f1040ed5bc..6738ca8682 100644 --- a/plugins/vpp/aclplugin/acl_config.go +++ b/plugins/vpp/aclplugin/acl_config.go @@ -102,11 +102,7 @@ func (plugin *ACLConfigurator) Init(logger logging.PluginLogger, goVppMux govppm } // ACL binary api handler - plugin.aclHandler = vppcalls.NewAclVppHandler(plugin.vppChan, plugin.vppDumpChan, plugin.stopwatch) - - // Message compatibility - if err = plugin.vppChan.CheckMessageCompatibility(vppcalls.AclMessages...); err != nil { - plugin.log.Error(err) + if plugin.aclHandler, err = vppcalls.NewAclVppHandler(plugin.vppChan, plugin.vppDumpChan, plugin.stopwatch); err != nil { return err } diff --git a/plugins/vpp/aclplugin/acl_config_test.go b/plugins/vpp/aclplugin/acl_config_test.go index ca3b6e9edb..cd716b5cf2 100644 --- a/plugins/vpp/aclplugin/acl_config_test.go +++ b/plugins/vpp/aclplugin/acl_config_test.go @@ -87,7 +87,7 @@ func TestAclConfiguratorInit(t *testing.T) { ctx.MockVpp.MockReply(&acl_api.ACLPluginGetVersionReply{}) // Test init - err := plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, nil, true) + err := plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, nil, false) Expect(err).To(BeNil()) err = plugin.Close() Expect(err).To(BeNil()) @@ -446,7 +446,7 @@ func aclTestSetup(t *testing.T, createIfs bool) (*vppcallmock.TestCtx, *core.Con // Configurator ctx.MockVpp.MockReply(&acl_api.ACLPluginGetVersionReply{1, 0}) plugin := &aclplugin.ACLConfigurator{} - err = plugin.Init(log, connection, ifIndexes, true) + err = plugin.Init(log, connection, ifIndexes, false) Expect(err).To(BeNil()) return ctx, connection, plugin diff --git a/plugins/vpp/aclplugin/vppcalls/acl_vppcalls_test.go b/plugins/vpp/aclplugin/vppcalls/acl_vppcalls_test.go index 58ff58bf45..3c08fff568 100644 --- a/plugins/vpp/aclplugin/vppcalls/acl_vppcalls_test.go +++ b/plugins/vpp/aclplugin/vppcalls/acl_vppcalls_test.go @@ -257,7 +257,8 @@ func TestAddIPAcl(t *testing.T) { defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&acl_api.ACLAddReplaceReply{}) - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) aclIndex, err := aclHandler.AddIPAcl(acl_IPrules, "test0") Expect(err).To(BeNil()) @@ -290,7 +291,8 @@ func TestAddMacIPAcl(t *testing.T) { defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&acl_api.MacipACLAddReply{}) - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) aclIndex, err := aclHandler.AddMacIPAcl(acl_MACIPrules, "test6") Expect(err).To(BeNil()) @@ -324,7 +326,8 @@ func TestDeleteIPAcl(t *testing.T) { defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&acl_api.ACLAddReplaceReply{}) - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) aclIndex, err := aclHandler.AddIPAcl(acl_IPrules, "test_del0") Expect(err).To(BeNil()) @@ -369,7 +372,8 @@ func TestDeleteMACIPAcl(t *testing.T) { defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&acl_api.MacipACLAddReply{}) - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) aclIndex, err := aclHandler.AddMacIPAcl(acl_MACIPrules, "test_del2") Expect(err).To(BeNil()) @@ -414,7 +418,8 @@ func TestModifyIPAcl(t *testing.T) { defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&acl_api.ACLAddReplaceReply{}) - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) aclIndex, err := aclHandler.AddIPAcl(acl_IPrules, "test_modify") Expect(err).To(BeNil()) @@ -472,7 +477,8 @@ func TestModifyMACIPAcl(t *testing.T) { defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&acl_api.MacipACLAddReply{}) - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) aclIndex, err := aclHandler.AddMacIPAcl(acl_MACIPrules, "test_modify") Expect(err).To(BeNil()) diff --git a/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go b/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go index 36803cb291..fcd9a228ed 100644 --- a/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go @@ -92,10 +92,14 @@ type aclVppHandler struct { } // NewAclVppHandler creates new instance of acl vppcalls handler -func NewAclVppHandler(callsChan, dumpChan govppapi.Channel, stopwatch *measure.Stopwatch) *aclVppHandler { - return &aclVppHandler{ +func NewAclVppHandler(callsChan, dumpChan govppapi.Channel, stopwatch *measure.Stopwatch) (*aclVppHandler, error) { + handler := &aclVppHandler{ callsChannel: callsChan, dumpChannel: dumpChan, stopwatch: stopwatch, } + if err := handler.callsChannel.CheckMessageCompatibility(AclMessages...); err != nil { + return nil, err + } + return handler, nil } diff --git a/plugins/vpp/aclplugin/vppcalls/dump_vppcalls_test.go b/plugins/vpp/aclplugin/vppcalls/dump_vppcalls_test.go index a14ea3b1e4..710c90f01b 100644 --- a/plugins/vpp/aclplugin/vppcalls/dump_vppcalls_test.go +++ b/plugins/vpp/aclplugin/vppcalls/dump_vppcalls_test.go @@ -28,7 +28,10 @@ import ( // Test translation of IP rule into ACL Plugin's format func TestGetIPRuleMatch(t *testing.T) { - aclHandler := NewAclVppHandler(nil, nil, nil) + ctx := vppcallmock.SetupTestCtx(t) + defer ctx.TeardownTestCtx() + aclHandler, err := NewAclVppHandler(ctx.MockChannel, nil, nil) + Expect(err).To(BeNil()) icmpV4Rule := aclHandler.getIPRuleMatches(acl_api.ACLRule{ SrcIPAddr: []byte{10, 0, 0, 1}, @@ -78,7 +81,10 @@ func TestGetIPRuleMatch(t *testing.T) { // Test translation of MACIP rule into ACL Plugin's format func TestGetMACIPRuleMatches(t *testing.T) { - aclHandler := NewAclVppHandler(nil, nil, nil) + ctx := vppcallmock.SetupTestCtx(t) + defer ctx.TeardownTestCtx() + aclHandler, err := NewAclVppHandler(ctx.MockChannel, nil, nil) + Expect(err).To(BeNil()) macipV4Rule := aclHandler.getMACIPRuleMatches(acl_api.MacipACLRule{ IsPermit: 1, @@ -136,7 +142,8 @@ func TestDumpIPACL(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test", nil)) swIfIndexes.RegisterName("if0", 1, nil) @@ -182,7 +189,8 @@ func TestDumpMACIPACL(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test", nil)) swIfIndexes.RegisterName("if0", 1, nil) @@ -209,7 +217,8 @@ func TestDumpACLInterfaces(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test", nil)) swIfIndexes.RegisterName("if0", 1, nil) @@ -234,7 +243,8 @@ func TestDumpMACIPACLInterfaces(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test-sw_if_indexes", ifaceidx.IndexMetadata)) swIfIndexes.RegisterName("if0", 1, nil) @@ -261,7 +271,8 @@ func TestDumpIPAcls(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) IPRuleACLs, err := aclHandler.DumpIPAcls() Expect(err).To(Succeed()) @@ -280,7 +291,8 @@ func TestDumpMacIPAcls(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) MacIPRuleACLs, err := aclHandler.DumpMacIPAcls() Expect(err).To(Succeed()) @@ -308,7 +320,8 @@ func TestDumpInterfaceIPAcls(t *testing.T) { R: []acl_api.ACLRule{{IsPermit: 2}, {IsPermit: 0}}, }) - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) ACLs, err := aclHandler.DumpInterfaceIPAcls(0) Expect(err).To(Succeed()) @@ -335,7 +348,8 @@ func TestDumpInterfaceMACIPAcls(t *testing.T) { R: []acl_api.MacipACLRule{{IsPermit: 2}, {IsPermit: 1}}, }) - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) ACLs, err := aclHandler.DumpInterfaceMACIPAcls(0) Expect(err).To(Succeed()) @@ -346,7 +360,8 @@ func TestDumpInterface(t *testing.T) { ctx := vppcallmock.SetupTestCtx(t) defer ctx.TeardownTestCtx() - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceListDetails{ SwIfIndex: 0, @@ -414,7 +429,8 @@ func TestDumpInterfaces(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) IPacls, MACIPacls, err := aclHandler.DumpInterfaces() Expect(err).To(BeNil()) diff --git a/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls_test.go b/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls_test.go index 1d01c903f3..8eff937385 100644 --- a/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls_test.go +++ b/plugins/vpp/aclplugin/vppcalls/interfaces_vppcalls_test.go @@ -27,7 +27,8 @@ func TestRequestSetACLToInterfaces(t *testing.T) { ctx := vppcallmock.SetupTestCtx(t) defer ctx.TeardownTestCtx() - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceListDetails{ 0, @@ -36,7 +37,7 @@ func TestRequestSetACLToInterfaces(t *testing.T) { []uint32{0, 1}, }) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceSetACLListReply{}) - err := aclHandler.SetACLToInterfacesAsIngress(0, []uint32{0}) + err = aclHandler.SetACLToInterfacesAsIngress(0, []uint32{0}) Expect(err).To(BeNil()) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceListDetails{ @@ -81,7 +82,8 @@ func TestRequestRemoveInterfacesFromACL(t *testing.T) { ctx := vppcallmock.SetupTestCtx(t) defer ctx.TeardownTestCtx() - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceListDetails{ 0, @@ -90,7 +92,7 @@ func TestRequestRemoveInterfacesFromACL(t *testing.T) { []uint32{0, 1}, }) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceSetACLListReply{}) - err := aclHandler.RemoveIPIngressACLFromInterfaces(0, []uint32{0}) + err = aclHandler.RemoveIPIngressACLFromInterfaces(0, []uint32{0}) Expect(err).To(BeNil()) ctx.MockVpp.MockReply(&acl_api.ACLInterfaceListDetails{ @@ -135,10 +137,11 @@ func TestSetMacIPAclToInterface(t *testing.T) { ctx := vppcallmock.SetupTestCtx(t) defer ctx.TeardownTestCtx() - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) ctx.MockVpp.MockReply(&acl_api.MacipACLInterfaceAddDelReply{}) - err := aclHandler.SetMacIPAclToInterface(0, []uint32{0}) + err = aclHandler.SetMacIPAclToInterface(0, []uint32{0}) Expect(err).To(BeNil()) // error cases @@ -157,10 +160,11 @@ func TestRemoveMacIPIngressACLFromInterfaces(t *testing.T) { ctx := vppcallmock.SetupTestCtx(t) defer ctx.TeardownTestCtx() - aclHandler := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + aclHandler, err := NewAclVppHandler(ctx.MockChannel, ctx.MockChannel, nil) + Expect(err).To(BeNil()) ctx.MockVpp.MockReply(&acl_api.MacipACLInterfaceAddDelReply{}) - err := aclHandler.RemoveMacIPIngressACLFromInterfaces(1, []uint32{0}) + err = aclHandler.RemoveMacIPIngressACLFromInterfaces(1, []uint32{0}) Expect(err).To(BeNil()) // error cases diff --git a/plugins/vpp/ifplugin/afpacket_config.go b/plugins/vpp/ifplugin/afpacket_config.go index 52a2b106aa..e01a59bf28 100644 --- a/plugins/vpp/ifplugin/afpacket_config.go +++ b/plugins/vpp/ifplugin/afpacket_config.go @@ -35,7 +35,7 @@ type AFPacketConfigurator struct { afPacketByName map[string]*AfPacketConfig // af packet name -> Af Packet interface configuration linuxHostInterfaces map[string]struct{} // a set of available host (Linux) interfaces - ifHandler vppcalls.IfVppAPI // govpp channel used by InterfaceConfigurator + ifHandler vppcalls.IfVppAPI // handler used by InterfaceConfigurator } // AfPacketConfig wraps the proto formatted configuration of an Afpacket interface together with a flag diff --git a/plugins/vpp/ifplugin/afpacket_config_test.go b/plugins/vpp/ifplugin/afpacket_config_test.go index 00c12fe1b0..8dd07d160e 100644 --- a/plugins/vpp/ifplugin/afpacket_config_test.go +++ b/plugins/vpp/ifplugin/afpacket_config_test.go @@ -45,7 +45,8 @@ func TestAfPacketConfiguratorInit(t *testing.T) { vppCh, err := connection.NewAPIChannel() Expect(err).To(BeNil()) stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) - ifHandler := vppcalls.NewIfVppHandler(vppCh, logrus.DefaultLogger(), stopwatch) + ifHandler, err := vppcalls.NewIfVppHandler(vppCh, logrus.DefaultLogger(), stopwatch) + Expect(err).To(BeNil()) err = plugin.Init(logrus.DefaultLogger(), ifHandler, struct{}{}, nil) Expect(err).To(BeNil()) connection.Disconnect() @@ -412,8 +413,9 @@ func TestAfPacketNewLinuxInterfaceNoLinux(t *testing.T) { // Configurator plugin := &ifplugin.AFPacketConfigurator{} stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) - ifHandler := vppcalls.NewIfVppHandler(ctx.MockChannel, log, stopwatch) - err := plugin.Init(log, ifHandler, nil, swIfIndices) + ifHandler, err := vppcalls.NewIfVppHandler(ctx.MockChannel, log, stopwatch) + Expect(err).To(BeNil()) + err = plugin.Init(log, ifHandler, nil, swIfIndices) Expect(err).To(BeNil()) // Test registered linux interface config := plugin.ResolveCreatedLinuxInterface("host1", "host1", 1) @@ -469,8 +471,9 @@ func TestAfPacketDeleteLinuxInterfaceNoLinux(t *testing.T) { // Configurator plugin := &ifplugin.AFPacketConfigurator{} stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) - ifHandler := vppcalls.NewIfVppHandler(ctx.MockChannel, log, stopwatch) - err := plugin.Init(log, ifHandler, nil, swIfIndices) + ifHandler, err := vppcalls.NewIfVppHandler(ctx.MockChannel, log, stopwatch) + Expect(err).To(BeNil()) + err = plugin.Init(log, ifHandler, nil, swIfIndices) Expect(err).To(BeNil()) // Prepare plugin.ResolveCreatedLinuxInterface("host1", "host1", 1) @@ -522,8 +525,9 @@ func afPacketTestSetup(t *testing.T) (*vppcallmock.TestCtx, *ifplugin.AFPacketCo // Configurator plugin := &ifplugin.AFPacketConfigurator{} stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) - ifHandler := vppcalls.NewIfVppHandler(ctx.MockChannel, log, stopwatch) - err := plugin.Init(log, ifHandler, struct{}{}, swIfIndices) + ifHandler, err := vppcalls.NewIfVppHandler(ctx.MockChannel, log, stopwatch) + Expect(err).To(BeNil()) + err = plugin.Init(log, ifHandler, struct{}{}, swIfIndices) Expect(err).To(BeNil()) return ctx, plugin, swIfIndices diff --git a/plugins/vpp/ifplugin/bfd_config.go b/plugins/vpp/ifplugin/bfd_config.go index 1a83350fe2..f4c7739d1f 100644 --- a/plugins/vpp/ifplugin/bfd_config.go +++ b/plugins/vpp/ifplugin/bfd_config.go @@ -81,10 +81,7 @@ func (plugin *BFDConfigurator) Init(logger logging.PluginLogger, goVppMux govppm } // VPP API handler - plugin.bfdHandler = vppcalls.NewBfdVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch) - - if err = plugin.bfdHandler.CheckMsgCompatibilityForBfd(); err != nil { - plugin.log.Error(err) + if plugin.bfdHandler, err = vppcalls.NewBfdVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch); err != nil { return err } diff --git a/plugins/vpp/ifplugin/interface_config.go b/plugins/vpp/ifplugin/interface_config.go index 814d1d91af..583945d22e 100644 --- a/plugins/vpp/ifplugin/interface_config.go +++ b/plugins/vpp/ifplugin/interface_config.go @@ -97,8 +97,7 @@ func (plugin *InterfaceConfigurator) Init(logger logging.PluginLogger, goVppMux } // VPP API handler - plugin.ifHandler = vppcalls.NewIfVppHandler(plugin.vppCh, plugin.log, plugin.stopwatch) - if err := plugin.ifHandler.CheckMsgCompatibilityForInterface(); err != nil { + if plugin.ifHandler, err = vppcalls.NewIfVppHandler(plugin.vppCh, plugin.log, plugin.stopwatch); err != nil { return err } diff --git a/plugins/vpp/ifplugin/nat_config.go b/plugins/vpp/ifplugin/nat_config.go index ecac65e5f0..d6a69d2090 100644 --- a/plugins/vpp/ifplugin/nat_config.go +++ b/plugins/vpp/ifplugin/nat_config.go @@ -116,8 +116,7 @@ func (plugin *NatConfigurator) Init(logger logging.PluginLogger, goVppMux govppm } // VPP API handler - plugin.natHandler = vppcalls.NewNatVppHandler(plugin.vppChan, plugin.vppDumpChan, plugin.log, plugin.stopwatch) - if err := plugin.natHandler.CheckMsgCompatibilityForNat(); err != nil { + if plugin.natHandler, err = vppcalls.NewNatVppHandler(plugin.vppChan, plugin.vppDumpChan, plugin.log, plugin.stopwatch); err != nil { return err } diff --git a/plugins/vpp/ifplugin/stn_config.go b/plugins/vpp/ifplugin/stn_config.go index 2577d800d3..b0a7ad424e 100644 --- a/plugins/vpp/ifplugin/stn_config.go +++ b/plugins/vpp/ifplugin/stn_config.go @@ -90,8 +90,7 @@ func (plugin *StnConfigurator) Init(logger logging.PluginLogger, goVppMux govppm plugin.allIndexesSeq, plugin.unstoredIndexSeq = 1, 1 // VPP API handler - plugin.stnHandler = vppcalls.NewStnVppHandler(plugin.vppChan, plugin.stopwatch) - if err := plugin.stnHandler.CheckMsgCompatibilityForStn(); err != nil { + if plugin.stnHandler, err = vppcalls.NewStnVppHandler(plugin.vppChan, plugin.stopwatch); err != nil { return err } diff --git a/plugins/vpp/ifplugin/vppcalls/admin_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/admin_vppcalls_test.go index 2c2fb1ab9b..dadd14641b 100644 --- a/plugins/vpp/ifplugin/vppcalls/admin_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/admin_vppcalls_test.go @@ -176,6 +176,7 @@ func TestInterfaceRemoveTagRetval(t *testing.T) { func ifTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.IfVppAPI) { ctx := vppcallmock.SetupTestCtx(t) log := logrus.NewLogger("test-log") - ifHandler := vppcalls.NewIfVppHandler(ctx.MockChannel, log, measure.NewStopwatch("test-stopwatch", log)) + ifHandler, err := vppcalls.NewIfVppHandler(ctx.MockChannel, log, measure.NewStopwatch("test-stopwatch", log)) + Expect(err).To(BeNil()) return ctx, ifHandler } diff --git a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go index affa45c1bb..f9af5fe9d8 100644 --- a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go @@ -33,8 +33,6 @@ import ( type IfVppAPI interface { IfVppWrite IfVppRead - // CheckMsgCompatibilityForInterface checks if interface CRSs are compatible with VPP in runtime. - CheckMsgCompatibilityForInterface() error } // IfVppWrite provides write methods for interface plugin @@ -118,8 +116,6 @@ type IfVppRead interface { type BfdVppAPI interface { BfdVppWrite BfdVppRead - // CheckMsgCompatibilityForBfd checks if bfd CRSs are compatible with VPP in runtime. - CheckMsgCompatibilityForBfd() error } // BfdVppWrite provides write methods for BFD @@ -156,8 +152,6 @@ type BfdVppRead interface { type NatVppAPI interface { NatVppWrite NatVppRead - // CheckMsgCompatibilityForNat verifies compatibility of used binary API calls - CheckMsgCompatibilityForNat() error } // NatVppWrite provides write methods for NAT @@ -205,8 +199,6 @@ type NatVppRead interface { type StnVppAPI interface { StnVppWrite StnVppRead - // CheckMsgCompatibilityForStn verifies compatibility of used binary API calls - CheckMsgCompatibilityForStn() error } // StnVppWrite provides write methods for STN @@ -252,37 +244,57 @@ type stnVppHandler struct { } // NewIfVppHandler creates new instance of interface vppcalls handler -func NewIfVppHandler(callsChan api.Channel, log logging.Logger, stopwatch *measure.Stopwatch) *ifVppHandler { - return &ifVppHandler{ +func NewIfVppHandler(callsChan api.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*ifVppHandler, error) { + handler := &ifVppHandler{ callsChannel: callsChan, stopwatch: stopwatch, log: log, } + if err := handler.CheckMsgCompatibilityForInterface(); err != nil { + return nil, err + } + + return handler, nil } // NewBfdVppHandler creates new instance of BFD vppcalls handler -func NewBfdVppHandler(callsChan api.Channel, log logging.Logger, stopwatch *measure.Stopwatch) *bfdVppHandler { - return &bfdVppHandler{ +func NewBfdVppHandler(callsChan api.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*bfdVppHandler, error) { + handler := &bfdVppHandler{ callsChannel: callsChan, stopwatch: stopwatch, log: log, } + if err := handler.CheckMsgCompatibilityForBfd(); err != nil { + return nil, err + } + + return handler, nil } // NewNatVppHandler creates new instance of NAT vppcalls handler -func NewNatVppHandler(callsChan, dumpChan api.Channel, log logging.Logger, stopwatch *measure.Stopwatch) *natVppHandler { - return &natVppHandler{ +func NewNatVppHandler(callsChan, dumpChan api.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*natVppHandler, error) { + handler := &natVppHandler{ callsChannel: callsChan, dumpChannel: dumpChan, stopwatch: stopwatch, log: log, } + if err := handler.CheckMsgCompatibilityForNat(); err != nil { + return nil, err + } + + return handler, nil } // NewStnVppHandler creates new instance of STN vppcalls handler -func NewStnVppHandler(callsChan api.Channel, stopwatch *measure.Stopwatch) *stnVppHandler { - return &stnVppHandler{ +func NewStnVppHandler(callsChan api.Channel, stopwatch *measure.Stopwatch) (*stnVppHandler, error) { + handler := &stnVppHandler{ callsChannel: callsChan, stopwatch: stopwatch, } + if err := handler.CheckMsgCompatibilityForStn(); err != nil { + return nil, err + } + + return handler, nil } diff --git a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go index 07db44cac1..ad9dc00d60 100644 --- a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go @@ -917,6 +917,7 @@ func TestDeleteBfdEchoFunctionRetval(t *testing.T) { func bfdTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.BfdVppAPI) { ctx := vppcallmock.SetupTestCtx(t) log := logrus.NewLogger("test-log") - bfdHandler := vppcalls.NewBfdVppHandler(ctx.MockChannel, log, measure.NewStopwatch("test-stopwatch", log)) + bfdHandler, err := vppcalls.NewBfdVppHandler(ctx.MockChannel, log, measure.NewStopwatch("test-stopwatch", log)) + Expect(err).To(BeNil()) return ctx, bfdHandler } diff --git a/plugins/vpp/ifplugin/vppcalls/compat_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/compat_vppcalls.go index 147bbd6cce..a35397e8dc 100644 --- a/plugins/vpp/ifplugin/vppcalls/compat_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/compat_vppcalls.go @@ -28,6 +28,7 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/binapi/vxlan" ) +// CheckMsgCompatibilityForInterface checks if interface CRSs are compatible with VPP in runtime. func (handler *ifVppHandler) CheckMsgCompatibilityForInterface() error { msgs := []govppapi.Message{ &memif.MemifCreate{}, @@ -98,6 +99,7 @@ func (handler *ifVppHandler) CheckMsgCompatibilityForInterface() error { return handler.callsChannel.CheckMessageCompatibility(msgs...) } +// CheckMsgCompatibilityForBfd checks if bfd CRSs are compatible with VPP in runtime. func (handler *bfdVppHandler) CheckMsgCompatibilityForBfd() error { msgs := []govppapi.Message{ &bfd.BfdUDPAdd{}, @@ -114,6 +116,7 @@ func (handler *bfdVppHandler) CheckMsgCompatibilityForBfd() error { return handler.callsChannel.CheckMessageCompatibility(msgs...) } +// CheckMsgCompatibilityForNat verifies compatibility of used binary API calls func (handler *natVppHandler) CheckMsgCompatibilityForNat() error { msgs := []govppapi.Message{ &nat.Nat44AddDelAddressRange{}, @@ -130,6 +133,7 @@ func (handler *natVppHandler) CheckMsgCompatibilityForNat() error { return handler.callsChannel.CheckMessageCompatibility(msgs...) } +// CheckMsgCompatibilityForStn verifies compatibility of used binary API calls func (handler *stnVppHandler) CheckMsgCompatibilityForStn() error { msgs := []govppapi.Message{ &stn.StnAddDelRule{}, diff --git a/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls_test.go index c3ec6adfb1..c7d76bb770 100644 --- a/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls_test.go @@ -90,6 +90,7 @@ func TestNat44InterfaceDump3(t *testing.T) { func natTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.NatVppAPI) { ctx := vppcallmock.SetupTestCtx(t) log := logrus.NewLogger("test-log") - natHandler := vppcalls.NewNatVppHandler(ctx.MockChannel, ctx.MockChannel, log, measure.NewStopwatch("test-stopwatch", log)) + natHandler, err := vppcalls.NewNatVppHandler(ctx.MockChannel, ctx.MockChannel, log, measure.NewStopwatch("test-stopwatch", log)) + Expect(err).To(BeNil()) return ctx, natHandler } diff --git a/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go index 599ec85a58..c19d01e4f3 100644 --- a/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go @@ -118,6 +118,7 @@ func TestDelStnRule(t *testing.T) { func stnTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.StnVppAPI) { ctx := vppcallmock.SetupTestCtx(t) log := logrus.NewLogger("test-log") - stnHandler := vppcalls.NewStnVppHandler(ctx.MockChannel, measure.NewStopwatch("test-stopwatch", log)) + stnHandler, err := vppcalls.NewStnVppHandler(ctx.MockChannel, measure.NewStopwatch("test-stopwatch", log)) + Expect(err).To(BeNil()) return ctx, stnHandler } diff --git a/plugins/vpp/ipsecplugin/ipsec_config.go b/plugins/vpp/ipsecplugin/ipsec_config.go index ad4adaaffd..d3eb03566d 100644 --- a/plugins/vpp/ipsecplugin/ipsec_config.go +++ b/plugins/vpp/ipsecplugin/ipsec_config.go @@ -95,7 +95,9 @@ func (plugin *IPSecConfigurator) Init(logger logging.PluginLogger, goVppMux govp } // VPP API handlers - plugin.ifHandler = iface_vppcalls.NewIfVppHandler(plugin.vppCh, plugin.log, plugin.stopwatch) + if plugin.ifHandler, err = iface_vppcalls.NewIfVppHandler(plugin.vppCh, plugin.log, plugin.stopwatch); err != nil { + return err + } // Message compatibility if err = plugin.vppCh.CheckMessageCompatibility(vppcalls.IPSecMessages...); err != nil { diff --git a/plugins/vpp/l2plugin/bd_config.go b/plugins/vpp/l2plugin/bd_config.go index d908c45cd6..fd74aa2833 100644 --- a/plugins/vpp/l2plugin/bd_config.go +++ b/plugins/vpp/l2plugin/bd_config.go @@ -99,7 +99,9 @@ func (plugin *BDConfigurator) Init(logger logging.PluginLogger, goVppMux govppmu } // VPP API handlers - plugin.ifHandler = ifvppcalls.NewIfVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch) + if plugin.ifHandler, err = ifvppcalls.NewIfVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch); err != nil { + return err + } // Message compatibility err = plugin.vppChan.CheckMessageCompatibility(vppcalls.BridgeDomainMessages...) diff --git a/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go b/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go index 57300fb08d..d6148229f6 100644 --- a/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go +++ b/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go @@ -136,7 +136,10 @@ func vppAddDelRoute(route *Route, vppChan govppapi.Channel, delete bool, stopwat // VppAddRoute adds new route, according to provided input. Every route has to contain VRF ID (default is 0). func VppAddRoute(route *Route, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - ifHandler := ifvppcalls.NewIfVppHandler(vppChan, logrus.DefaultLogger(), stopwatch) // TODO temp change + ifHandler, err := ifvppcalls.NewIfVppHandler(vppChan, logrus.DefaultLogger(), stopwatch) // TODO temp change + if err != nil { + return err + } if err := ifHandler.CreateVrfIfNeeded(route.VrfID); err != nil { return err } diff --git a/tests/vppcallmock/vpp_ctx_mock.go b/tests/vppcallmock/vpp_ctx_mock.go index 45f880fe8b..a9948f2b64 100644 --- a/tests/vppcallmock/vpp_ctx_mock.go +++ b/tests/vppcallmock/vpp_ctx_mock.go @@ -84,10 +84,9 @@ func (m *mockedChannel) SendMultiRequest(msg govppapi.Message) govppapi.MultiReq return m.channel.SendMultiRequest(msg) } -// CheckMessageCompatibility checks whether provided messages are compatible with the version of VPP -// which the library is connected to +// CheckMessageCompatibility does nothing for mocked channel func (m *mockedChannel) CheckMessageCompatibility(msgs ...govppapi.Message) error { - return m.channel.CheckMessageCompatibility(msgs...) + return nil } // SubscribeNotification subscribes for receiving of the specified notification messages via provided Go channel From c2f90962e71b528a29badfbb5f0316b7c2af9f37 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 23 Jul 2018 16:17:21 +0200 Subject: [PATCH 034/174] stopwatch removed from tests + changed message compatibility check Signed-off-by: Vladimir Lavor --- plugins/vpp/ifplugin/afpacket_config_test.go | 13 +- plugins/vpp/ifplugin/bfd_config_test.go | 1 - .../ifplugin/vppcalls/admin_vppcalls_test.go | 3 +- plugins/vpp/ifplugin/vppcalls/api_vppcalls.go | 8 +- .../ifplugin/vppcalls/bfd_vppcalls_test.go | 3 +- .../vpp/ifplugin/vppcalls/compat_vppcalls.go | 186 ++++++++---------- .../vppcalls/dump_nat_vppcalls_test.go | 3 +- .../ifplugin/vppcalls/stn_vppcalls_test.go | 5 +- 8 files changed, 99 insertions(+), 123 deletions(-) diff --git a/plugins/vpp/ifplugin/afpacket_config_test.go b/plugins/vpp/ifplugin/afpacket_config_test.go index 8dd07d160e..7eff10f02f 100644 --- a/plugins/vpp/ifplugin/afpacket_config_test.go +++ b/plugins/vpp/ifplugin/afpacket_config_test.go @@ -21,7 +21,6 @@ import ( "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/cn-infra/utils/safeclose" "github.com/ligato/vpp-agent/idxvpp/nametoidx" ap_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/af_packet" @@ -44,8 +43,7 @@ func TestAfPacketConfiguratorInit(t *testing.T) { plugin := &ifplugin.AFPacketConfigurator{} vppCh, err := connection.NewAPIChannel() Expect(err).To(BeNil()) - stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) - ifHandler, err := vppcalls.NewIfVppHandler(vppCh, logrus.DefaultLogger(), stopwatch) + ifHandler, err := vppcalls.NewIfVppHandler(vppCh, logrus.DefaultLogger(), nil) Expect(err).To(BeNil()) err = plugin.Init(logrus.DefaultLogger(), ifHandler, struct{}{}, nil) Expect(err).To(BeNil()) @@ -412,8 +410,7 @@ func TestAfPacketNewLinuxInterfaceNoLinux(t *testing.T) { swIfIndices := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "afpacket", nil)) // Configurator plugin := &ifplugin.AFPacketConfigurator{} - stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) - ifHandler, err := vppcalls.NewIfVppHandler(ctx.MockChannel, log, stopwatch) + ifHandler, err := vppcalls.NewIfVppHandler(ctx.MockChannel, log, nil) Expect(err).To(BeNil()) err = plugin.Init(log, ifHandler, nil, swIfIndices) Expect(err).To(BeNil()) @@ -470,8 +467,7 @@ func TestAfPacketDeleteLinuxInterfaceNoLinux(t *testing.T) { swIfIndices := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "afpacket", nil)) // Configurator plugin := &ifplugin.AFPacketConfigurator{} - stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) - ifHandler, err := vppcalls.NewIfVppHandler(ctx.MockChannel, log, stopwatch) + ifHandler, err := vppcalls.NewIfVppHandler(ctx.MockChannel, log, nil) Expect(err).To(BeNil()) err = plugin.Init(log, ifHandler, nil, swIfIndices) Expect(err).To(BeNil()) @@ -524,8 +520,7 @@ func afPacketTestSetup(t *testing.T) (*vppcallmock.TestCtx, *ifplugin.AFPacketCo swIfIndices := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "afpacket", nil)) // Configurator plugin := &ifplugin.AFPacketConfigurator{} - stopwatch := measure.NewStopwatch("test-stopwatch", logrus.DefaultLogger()) - ifHandler, err := vppcalls.NewIfVppHandler(ctx.MockChannel, log, stopwatch) + ifHandler, err := vppcalls.NewIfVppHandler(ctx.MockChannel, log, nil) Expect(err).To(BeNil()) err = plugin.Init(log, ifHandler, struct{}{}, swIfIndices) Expect(err).To(BeNil()) diff --git a/plugins/vpp/ifplugin/bfd_config_test.go b/plugins/vpp/ifplugin/bfd_config_test.go index 3d1372e74a..b0280ffcfc 100644 --- a/plugins/vpp/ifplugin/bfd_config_test.go +++ b/plugins/vpp/ifplugin/bfd_config_test.go @@ -41,7 +41,6 @@ func TestBfdConfiguratorInit(t *testing.T) { RegisterTestingT(t) connection, _ := core.Connect(&mock.VppAdapter{}) defer connection.Disconnect() - plugin := &ifplugin.BFDConfigurator{} err := plugin.Init(logging.ForPlugin("test-log", logrus.NewLogRegistry()), connection, nil, true) diff --git a/plugins/vpp/ifplugin/vppcalls/admin_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/admin_vppcalls_test.go index dadd14641b..9da20dccbb 100644 --- a/plugins/vpp/ifplugin/vppcalls/admin_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/admin_vppcalls_test.go @@ -18,7 +18,6 @@ import ( "testing" "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" "github.com/ligato/vpp-agent/tests/vppcallmock" @@ -176,7 +175,7 @@ func TestInterfaceRemoveTagRetval(t *testing.T) { func ifTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.IfVppAPI) { ctx := vppcallmock.SetupTestCtx(t) log := logrus.NewLogger("test-log") - ifHandler, err := vppcalls.NewIfVppHandler(ctx.MockChannel, log, measure.NewStopwatch("test-stopwatch", log)) + ifHandler, err := vppcalls.NewIfVppHandler(ctx.MockChannel, log, nil) Expect(err).To(BeNil()) return ctx, ifHandler } diff --git a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go index f9af5fe9d8..8b73ce656a 100644 --- a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go @@ -250,7 +250,7 @@ func NewIfVppHandler(callsChan api.Channel, log logging.Logger, stopwatch *measu stopwatch: stopwatch, log: log, } - if err := handler.CheckMsgCompatibilityForInterface(); err != nil { + if err := handler.callsChannel.CheckMessageCompatibility(InterfaceMessages...); err != nil { return nil, err } @@ -264,7 +264,7 @@ func NewBfdVppHandler(callsChan api.Channel, log logging.Logger, stopwatch *meas stopwatch: stopwatch, log: log, } - if err := handler.CheckMsgCompatibilityForBfd(); err != nil { + if err := handler.callsChannel.CheckMessageCompatibility(BfdMessages...); err != nil { return nil, err } @@ -279,7 +279,7 @@ func NewNatVppHandler(callsChan, dumpChan api.Channel, log logging.Logger, stopw stopwatch: stopwatch, log: log, } - if err := handler.CheckMsgCompatibilityForNat(); err != nil { + if err := handler.callsChannel.CheckMessageCompatibility(NatMessages...); err != nil { return nil, err } @@ -292,7 +292,7 @@ func NewStnVppHandler(callsChan api.Channel, stopwatch *measure.Stopwatch) (*stn callsChannel: callsChan, stopwatch: stopwatch, } - if err := handler.CheckMsgCompatibilityForStn(); err != nil { + if err := handler.callsChannel.CheckMessageCompatibility(StnMessages...); err != nil { return nil, err } diff --git a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go index ad9dc00d60..9b891a9ae2 100644 --- a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go @@ -19,7 +19,6 @@ import ( "testing" "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/idxvpp/nametoidx" bfd_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" @@ -917,7 +916,7 @@ func TestDeleteBfdEchoFunctionRetval(t *testing.T) { func bfdTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.BfdVppAPI) { ctx := vppcallmock.SetupTestCtx(t) log := logrus.NewLogger("test-log") - bfdHandler, err := vppcalls.NewBfdVppHandler(ctx.MockChannel, log, measure.NewStopwatch("test-stopwatch", log)) + bfdHandler, err := vppcalls.NewBfdVppHandler(ctx.MockChannel, log, nil) Expect(err).To(BeNil()) return ctx, bfdHandler } diff --git a/plugins/vpp/ifplugin/vppcalls/compat_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/compat_vppcalls.go index a35397e8dc..ca7f099c1f 100644 --- a/plugins/vpp/ifplugin/vppcalls/compat_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/compat_vppcalls.go @@ -28,116 +28,104 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/binapi/vxlan" ) -// CheckMsgCompatibilityForInterface checks if interface CRSs are compatible with VPP in runtime. -func (handler *ifVppHandler) CheckMsgCompatibilityForInterface() error { - msgs := []govppapi.Message{ - &memif.MemifCreate{}, - &memif.MemifCreateReply{}, - &memif.MemifDelete{}, - &memif.MemifDeleteReply{}, - &memif.MemifDump{}, - &memif.MemifDetails{}, - &memif.MemifSocketFilenameDump{}, - &memif.MemifSocketFilenameDetails{}, +// InterfaceMessages checks if interface CRSs are compatible with VPP in runtime. +var InterfaceMessages = []govppapi.Message{ + &memif.MemifCreate{}, + &memif.MemifCreateReply{}, + &memif.MemifDelete{}, + &memif.MemifDeleteReply{}, + &memif.MemifDump{}, + &memif.MemifDetails{}, + &memif.MemifSocketFilenameDump{}, + &memif.MemifSocketFilenameDetails{}, - &interfaces.CreateLoopback{}, - &interfaces.CreateLoopbackReply{}, + &interfaces.CreateLoopback{}, + &interfaces.CreateLoopbackReply{}, - &vxlan.VxlanAddDelTunnel{}, - &vxlan.VxlanAddDelTunnelReply{}, - &vxlan.VxlanTunnelDump{}, - &vxlan.VxlanTunnelDetails{}, + &vxlan.VxlanAddDelTunnel{}, + &vxlan.VxlanAddDelTunnelReply{}, + &vxlan.VxlanTunnelDump{}, + &vxlan.VxlanTunnelDetails{}, - &af_packet.AfPacketCreate{}, - &af_packet.AfPacketCreateReply{}, - &af_packet.AfPacketDelete{}, - &af_packet.AfPacketDeleteReply{}, + &af_packet.AfPacketCreate{}, + &af_packet.AfPacketCreateReply{}, + &af_packet.AfPacketDelete{}, + &af_packet.AfPacketDeleteReply{}, - &tap.TapConnect{}, - &tap.TapConnectReply{}, - &tap.TapDelete{}, - &tap.TapDeleteReply{}, - &tap.SwInterfaceTapDump{}, - &tap.SwInterfaceTapDetails{}, + &tap.TapConnect{}, + &tap.TapConnectReply{}, + &tap.TapDelete{}, + &tap.TapDeleteReply{}, + &tap.SwInterfaceTapDump{}, + &tap.SwInterfaceTapDetails{}, - &tapv2.TapCreateV2{}, - &tapv2.TapCreateV2Reply{}, - &tapv2.TapDeleteV2{}, - &tapv2.TapDeleteV2Reply{}, + &tapv2.TapCreateV2{}, + &tapv2.TapCreateV2Reply{}, + &tapv2.TapDeleteV2{}, + &tapv2.TapDeleteV2Reply{}, - &interfaces.SwInterfaceDump{}, - &interfaces.SwInterfaceDetails{}, - &interfaces.SwInterfaceEvent{}, - &interfaces.SwInterfaceSetFlags{}, - &interfaces.SwInterfaceSetFlagsReply{}, - &interfaces.SwInterfaceAddDelAddress{}, - &interfaces.SwInterfaceAddDelAddressReply{}, - &interfaces.SwInterfaceSetMacAddress{}, - &interfaces.SwInterfaceSetMacAddressReply{}, - &interfaces.SwInterfaceSetTable{}, - &interfaces.SwInterfaceSetTableReply{}, - &interfaces.SwInterfaceGetTable{}, - &interfaces.SwInterfaceGetTableReply{}, - &interfaces.SwInterfaceSetUnnumbered{}, - &interfaces.SwInterfaceSetUnnumberedReply{}, - &interfaces.SwInterfaceTagAddDel{}, - &interfaces.SwInterfaceTagAddDelReply{}, - &interfaces.SwInterfaceSetMtu{}, - &interfaces.SwInterfaceSetMtuReply{}, - &interfaces.HwInterfaceSetMtu{}, - &interfaces.HwInterfaceSetMtuReply{}, + &interfaces.SwInterfaceDump{}, + &interfaces.SwInterfaceDetails{}, + &interfaces.SwInterfaceEvent{}, + &interfaces.SwInterfaceSetFlags{}, + &interfaces.SwInterfaceSetFlagsReply{}, + &interfaces.SwInterfaceAddDelAddress{}, + &interfaces.SwInterfaceAddDelAddressReply{}, + &interfaces.SwInterfaceSetMacAddress{}, + &interfaces.SwInterfaceSetMacAddressReply{}, + &interfaces.SwInterfaceSetTable{}, + &interfaces.SwInterfaceSetTableReply{}, + &interfaces.SwInterfaceGetTable{}, + &interfaces.SwInterfaceGetTableReply{}, + &interfaces.SwInterfaceSetUnnumbered{}, + &interfaces.SwInterfaceSetUnnumberedReply{}, + &interfaces.SwInterfaceTagAddDel{}, + &interfaces.SwInterfaceTagAddDelReply{}, + &interfaces.SwInterfaceSetMtu{}, + &interfaces.SwInterfaceSetMtuReply{}, + &interfaces.HwInterfaceSetMtu{}, + &interfaces.HwInterfaceSetMtuReply{}, - &ip.IPAddressDump{}, - &ip.IPAddressDetails{}, - &ip.IPFibDump{}, - &ip.IPFibDetails{}, - &ip.IPTableAddDel{}, - &ip.IPTableAddDelReply{}, - &ip.IPContainerProxyAddDel{}, - &ip.IPContainerProxyAddDelReply{}, - } - return handler.callsChannel.CheckMessageCompatibility(msgs...) + &ip.IPAddressDump{}, + &ip.IPAddressDetails{}, + &ip.IPFibDump{}, + &ip.IPFibDetails{}, + &ip.IPTableAddDel{}, + &ip.IPTableAddDelReply{}, + &ip.IPContainerProxyAddDel{}, + &ip.IPContainerProxyAddDelReply{}, } -// CheckMsgCompatibilityForBfd checks if bfd CRSs are compatible with VPP in runtime. -func (handler *bfdVppHandler) CheckMsgCompatibilityForBfd() error { - msgs := []govppapi.Message{ - &bfd.BfdUDPAdd{}, - &bfd.BfdUDPAddReply{}, - &bfd.BfdUDPMod{}, - &bfd.BfdUDPModReply{}, - &bfd.BfdUDPDel{}, - &bfd.BfdUDPDelReply{}, - &bfd.BfdAuthSetKey{}, - &bfd.BfdAuthSetKeyReply{}, - &bfd.BfdAuthDelKey{}, - &bfd.BfdAuthDelKeyReply{}, - } - return handler.callsChannel.CheckMessageCompatibility(msgs...) +// BfdMessages checks if bfd CRSs are compatible with VPP in runtime. +var BfdMessages = []govppapi.Message{ + &bfd.BfdUDPAdd{}, + &bfd.BfdUDPAddReply{}, + &bfd.BfdUDPMod{}, + &bfd.BfdUDPModReply{}, + &bfd.BfdUDPDel{}, + &bfd.BfdUDPDelReply{}, + &bfd.BfdAuthSetKey{}, + &bfd.BfdAuthSetKeyReply{}, + &bfd.BfdAuthDelKey{}, + &bfd.BfdAuthDelKeyReply{}, } -// CheckMsgCompatibilityForNat verifies compatibility of used binary API calls -func (handler *natVppHandler) CheckMsgCompatibilityForNat() error { - msgs := []govppapi.Message{ - &nat.Nat44AddDelAddressRange{}, - &nat.Nat44AddDelAddressRangeReply{}, - &nat.Nat44ForwardingEnableDisable{}, - &nat.Nat44ForwardingEnableDisableReply{}, - &nat.Nat44InterfaceAddDelFeature{}, - &nat.Nat44InterfaceAddDelFeatureReply{}, - &nat.Nat44AddDelStaticMapping{}, - &nat.Nat44AddDelStaticMappingReply{}, - &nat.Nat44AddDelLbStaticMapping{}, - &nat.Nat44AddDelLbStaticMappingReply{}, - } - return handler.callsChannel.CheckMessageCompatibility(msgs...) +// NatMessages verifies compatibility of used binary API calls +var NatMessages = []govppapi.Message{ + &nat.Nat44AddDelAddressRange{}, + &nat.Nat44AddDelAddressRangeReply{}, + &nat.Nat44ForwardingEnableDisable{}, + &nat.Nat44ForwardingEnableDisableReply{}, + &nat.Nat44InterfaceAddDelFeature{}, + &nat.Nat44InterfaceAddDelFeatureReply{}, + &nat.Nat44AddDelStaticMapping{}, + &nat.Nat44AddDelStaticMappingReply{}, + &nat.Nat44AddDelLbStaticMapping{}, + &nat.Nat44AddDelLbStaticMappingReply{}, } -// CheckMsgCompatibilityForStn verifies compatibility of used binary API calls -func (handler *stnVppHandler) CheckMsgCompatibilityForStn() error { - msgs := []govppapi.Message{ - &stn.StnAddDelRule{}, - &stn.StnAddDelRuleReply{}, - } - return handler.callsChannel.CheckMessageCompatibility(msgs...) +// StnMessages verifies compatibility of used binary API calls +var StnMessages = []govppapi.Message{ + &stn.StnAddDelRule{}, + &stn.StnAddDelRuleReply{}, } diff --git a/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls_test.go index c7d76bb770..62635b2048 100644 --- a/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls_test.go @@ -24,7 +24,6 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/tests/vppcallmock" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" . "github.com/onsi/gomega" ) @@ -90,7 +89,7 @@ func TestNat44InterfaceDump3(t *testing.T) { func natTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.NatVppAPI) { ctx := vppcallmock.SetupTestCtx(t) log := logrus.NewLogger("test-log") - natHandler, err := vppcalls.NewNatVppHandler(ctx.MockChannel, ctx.MockChannel, log, measure.NewStopwatch("test-stopwatch", log)) + natHandler, err := vppcalls.NewNatVppHandler(ctx.MockChannel, ctx.MockChannel, log, nil) Expect(err).To(BeNil()) return ctx, natHandler } diff --git a/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go index c19d01e4f3..b9aaa3eca5 100644 --- a/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go @@ -18,8 +18,6 @@ import ( "net" "testing" - "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/stn" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" "github.com/ligato/vpp-agent/tests/vppcallmock" @@ -117,8 +115,7 @@ func TestDelStnRule(t *testing.T) { func stnTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.StnVppAPI) { ctx := vppcallmock.SetupTestCtx(t) - log := logrus.NewLogger("test-log") - stnHandler, err := vppcalls.NewStnVppHandler(ctx.MockChannel, measure.NewStopwatch("test-stopwatch", log)) + stnHandler, err := vppcalls.NewStnVppHandler(ctx.MockChannel, nil) Expect(err).To(BeNil()) return ctx, stnHandler } From ee08981e6d0d0fdae74e82c1b5ee280a97ce2943 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 9 Jul 2018 10:09:57 +0200 Subject: [PATCH 035/174] l2plugin vppcalls api Signed-off-by: Vladimir Lavor --- plugins/rest/rest_handlers.go | 34 +++- plugins/vpp/l2plugin/bd_config.go | 40 ++-- plugins/vpp/l2plugin/bd_config_test.go | 3 +- plugins/vpp/l2plugin/data_resync.go | 17 +- plugins/vpp/l2plugin/fib_config.go | 34 ++-- plugins/vpp/l2plugin/vppcalls/api_vppcalls.go | 175 ++++++++++++++++++ .../l2plugin/vppcalls/arp_term_vppcalls.go | 28 ++- .../vppcalls/arp_term_vppcalls_test.go | 54 ++---- .../vppcalls/bridge_domain_vppcalls.go | 15 +- .../vppcalls/bridge_domain_vppcalls_test.go | 29 ++- plugins/vpp/l2plugin/vppcalls/doc.go | 2 +- .../{vppdump => vppcalls}/dump_vppcalls.go | 41 ++-- .../dump_vppcalls_test.go | 38 ++-- .../l2plugin/vppcalls/interface_vppcalls.go | 47 ++--- .../vppcalls/interface_vppcalls_test.go | 63 +++---- .../vpp/l2plugin/vppcalls/l2fib_vppcalls.go | 52 ++---- .../l2plugin/vppcalls/l2fib_vppcalls_test.go | 51 ++--- .../l2plugin/vppcalls/xconnect_vppcalls.go | 17 +- .../vppcalls/xconnect_vppcalls_test.go | 24 ++- plugins/vpp/l2plugin/vppdump/doc.go | 3 - plugins/vpp/l2plugin/xconnect_config.go | 20 +- plugins/vpp/plugin_impl_vpp.go | 3 +- 22 files changed, 464 insertions(+), 326 deletions(-) create mode 100644 plugins/vpp/l2plugin/vppcalls/api_vppcalls.go rename plugins/vpp/l2plugin/{vppdump => vppcalls}/dump_vppcalls.go (72%) rename plugins/vpp/l2plugin/{vppdump => vppcalls}/dump_vppcalls_test.go (87%) delete mode 100644 plugins/vpp/l2plugin/vppdump/doc.go diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index 8fc520aaa6..0d3823bbed 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -31,7 +31,7 @@ import ( aclcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" ifplugin "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" - l2plugin "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppdump" + l2plugin "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" l3plugin "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppdump" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" ) @@ -84,7 +84,13 @@ func (plugin *Plugin) bridgeDomainIdsGetHandler(formatter *render.Render) http.H } defer ch.Close() - res, err := l2plugin.DumpBridgeDomainIDs(ch, nil) + bdHandler, err := l2plugin.NewBridgeDomainVppHandler(ch, plugin.Log, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + res, err := bdHandler.DumpBridgeDomainIDs() if err != nil { plugin.Log.Errorf("Error: %v", err) formatter.JSON(w, http.StatusInternalServerError, err) @@ -111,7 +117,13 @@ func (plugin *Plugin) bridgeDomainsGetHandler(formatter *render.Render) http.Han } defer ch.Close() - res, err := l2plugin.DumpBridgeDomains(ch, nil) + bdHandler, err := l2plugin.NewBridgeDomainVppHandler(ch, plugin.Log, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + res, err := bdHandler.DumpBridgeDomains() if err != nil { plugin.Log.Errorf("Error: %v", err) formatter.JSON(w, http.StatusInternalServerError, nil) @@ -138,7 +150,13 @@ func (plugin *Plugin) fibTableEntriesGetHandler(formatter *render.Render) http.H } defer ch.Close() - res, err := l2plugin.DumpFIBTableEntries(ch, nil) + fibHandler, err := l2plugin.NewFibVppHandler(ch, nil, make(chan *l2plugin.FibLogicalReq), plugin.Log, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + res, err := fibHandler.DumpFIBTableEntries() if err != nil { plugin.Log.Errorf("Error: %v", err) formatter.JSON(w, http.StatusInternalServerError, nil) @@ -165,7 +183,13 @@ func (plugin *Plugin) xconnectPairsGetHandler(formatter *render.Render) http.Han } defer ch.Close() - res, err := l2plugin.DumpXConnectPairs(ch, nil) + xcHandler, err := l2plugin.NewXConnectVppHandler(ch, plugin.Log, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + res, err := xcHandler.DumpXConnectPairs() if err != nil { plugin.Log.Errorf("Error: %v", err) formatter.JSON(w, http.StatusInternalServerError, nil) diff --git a/plugins/vpp/l2plugin/bd_config.go b/plugins/vpp/l2plugin/bd_config.go index fd74aa2833..1b37b40405 100644 --- a/plugins/vpp/l2plugin/bd_config.go +++ b/plugins/vpp/l2plugin/bd_config.go @@ -50,6 +50,9 @@ type BDConfigurator struct { // VPP channel vppChan govppapi.Channel + // VPP API handlers + bdHandler vppcalls.BridgeDomainVppAPI + // State notification channel notificationChan chan BridgeDomainStateMessage // Injected, do not close here @@ -73,7 +76,8 @@ func (plugin *BDConfigurator) GetBdIndexes() l2idx.BDIndexRW { } // Init members (channels...) and start go routines. -func (plugin *BDConfigurator) Init(logger logging.PluginLogger, goVppMux govppmux.API, swIfIndexes ifaceidx.SwIfIndex, notificationChannel chan BridgeDomainStateMessage, enableStopwatch bool) (err error) { +func (plugin *BDConfigurator) Init(logger logging.PluginLogger, goVppMux govppmux.API, swIfIndexes ifaceidx.SwIfIndex, + notificationChannel chan BridgeDomainStateMessage, enableStopwatch bool) (err error) { // Logger plugin.log = logger.NewLogger("-l2-bd-conf") plugin.log.Debug("Initializing L2 Bridge domains configurator") @@ -95,7 +99,7 @@ func (plugin *BDConfigurator) Init(logger logging.PluginLogger, goVppMux govppmu // Stopwatch if enableStopwatch { - plugin.stopwatch = measure.NewStopwatch("ACLConfigurator", plugin.log) + plugin.stopwatch = measure.NewStopwatch("BDConfigurator", plugin.log) } // VPP API handlers @@ -103,9 +107,7 @@ func (plugin *BDConfigurator) Init(logger logging.PluginLogger, goVppMux govppmu return err } - // Message compatibility - err = plugin.vppChan.CheckMessageCompatibility(vppcalls.BridgeDomainMessages...) - if err != nil { + if plugin.bdHandler, err = vppcalls.NewBridgeDomainVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch); err != nil { return err } @@ -137,14 +139,13 @@ func (plugin *BDConfigurator) ConfigureBridgeDomain(bdConfig *l2.BridgeDomains_B plugin.bdIDSeq++ // Create bridge domain with respective index. - if err := vppcalls.VppAddBridgeDomain(bdIdx, bdConfig, plugin.vppChan, plugin.stopwatch); err != nil { + if err := plugin.bdHandler.VppAddBridgeDomain(bdIdx, bdConfig); err != nil { plugin.log.Errorf("adding bridge domain %v failed: %v", bdConfig.Name, err) return err } // Find all interfaces belonging to this bridge domain and set them up. - configuredIfs, err := vppcalls.SetInterfacesToBridgeDomain(bdConfig.Name, bdIdx, bdConfig.Interfaces, plugin.ifIndexes, plugin.log, - plugin.vppChan, plugin.stopwatch) + configuredIfs, err := plugin.bdHandler.SetInterfacesToBridgeDomain(bdConfig.Name, bdIdx, bdConfig.Interfaces, plugin.ifIndexes) if err != nil { return err } @@ -154,8 +155,7 @@ func (plugin *BDConfigurator) ConfigureBridgeDomain(bdConfig *l2.BridgeDomains_B if arpTerminationTable != nil && len(arpTerminationTable) != 0 { arpTable := bdConfig.ArpTerminationTable for _, arpEntry := range arpTable { - err := vppcalls.VppAddArpTerminationTableEntry(bdIdx, arpEntry.PhysAddress, arpEntry.IpAddress, - plugin.log, plugin.vppChan, plugin.stopwatch) + err := plugin.bdHandler.VppAddArpTerminationTableEntry(bdIdx, arpEntry.PhysAddress, arpEntry.IpAddress) if err != nil { plugin.log.Error(err) } @@ -216,13 +216,11 @@ func (plugin *BDConfigurator) ModifyBridgeDomain(newBdConfig *l2.BridgeDomains_B // Update interfaces. toSet, toUnset := plugin.calculateIfaceDiff(newBdConfig.Interfaces, oldBdConfig.Interfaces) - unConfIfs, err := vppcalls.UnsetInterfacesFromBridgeDomain(newBdConfig.Name, bdIdx, toUnset, plugin.ifIndexes, plugin.log, - plugin.vppChan, plugin.stopwatch) + unConfIfs, err := plugin.bdHandler.UnsetInterfacesFromBridgeDomain(newBdConfig.Name, bdIdx, toUnset, plugin.ifIndexes) if err != nil { return err } - newConfIfs, err := vppcalls.SetInterfacesToBridgeDomain(newBdConfig.Name, bdIdx, toSet, plugin.ifIndexes, plugin.log, - plugin.vppChan, plugin.stopwatch) + newConfIfs, err := plugin.bdHandler.SetInterfacesToBridgeDomain(newBdConfig.Name, bdIdx, toSet, plugin.ifIndexes) if err != nil { return err } @@ -232,12 +230,10 @@ func (plugin *BDConfigurator) ModifyBridgeDomain(newBdConfig *l2.BridgeDomains_B // Update ARP termination table. toAdd, toRemove := plugin.calculateARPDiff(newBdConfig.ArpTerminationTable, oldBdConfig.ArpTerminationTable) for _, entry := range toAdd { - vppcalls.VppAddArpTerminationTableEntry(bdIdx, entry.PhysAddress, entry.IpAddress, - plugin.log, plugin.vppChan, plugin.stopwatch) + plugin.bdHandler.VppAddArpTerminationTableEntry(bdIdx, entry.PhysAddress, entry.IpAddress) } for _, entry := range toRemove { - vppcalls.VppRemoveArpTerminationTableEntry(bdIdx, entry.PhysAddress, entry.IpAddress, - plugin.log, plugin.vppChan, plugin.stopwatch) + plugin.bdHandler.VppRemoveArpTerminationTableEntry(bdIdx, entry.PhysAddress, entry.IpAddress) } // Push change to bridge domain state. @@ -270,12 +266,11 @@ func (plugin *BDConfigurator) DeleteBridgeDomain(bdConfig *l2.BridgeDomains_Brid func (plugin *BDConfigurator) deleteBridgeDomain(bdConfig *l2.BridgeDomains_BridgeDomain, bdIdx uint32) error { // Unmap all interfaces from removed bridge domain. - if _, err := vppcalls.UnsetInterfacesFromBridgeDomain(bdConfig.Name, bdIdx, bdConfig.Interfaces, plugin.ifIndexes, - plugin.log, plugin.vppChan, plugin.stopwatch); err != nil { + if _, err := plugin.bdHandler.UnsetInterfacesFromBridgeDomain(bdConfig.Name, bdIdx, bdConfig.Interfaces, plugin.ifIndexes); err != nil { plugin.log.Error(err) // Try to remove bridge domain anyway } - if err := vppcalls.VppDeleteBridgeDomain(bdIdx, plugin.vppChan, plugin.stopwatch); err != nil { + if err := plugin.bdHandler.VppDeleteBridgeDomain(bdIdx); err != nil { return err } @@ -336,8 +331,7 @@ func (plugin *BDConfigurator) ResolveCreatedInterface(ifName string, ifIdx uint3 return nil } var bdIfs []*l2.BridgeDomains_BridgeDomain_Interfaces // Single-value - configuredIf, err := vppcalls.SetInterfacesToBridgeDomain(bd.Name, bdIdx, append(bdIfs, bdIf), plugin.ifIndexes, plugin.log, - plugin.vppChan, plugin.stopwatch) + configuredIf, err := plugin.bdHandler.SetInterfacesToBridgeDomain(bd.Name, bdIdx, append(bdIfs, bdIf), plugin.ifIndexes) if err != nil { return fmt.Errorf("error while assigning interface %s to bridge domain %s", ifName, bd.Name) } diff --git a/plugins/vpp/l2plugin/bd_config_test.go b/plugins/vpp/l2plugin/bd_config_test.go index 165be64312..70373a0be3 100644 --- a/plugins/vpp/l2plugin/bd_config_test.go +++ b/plugins/vpp/l2plugin/bd_config_test.go @@ -15,6 +15,8 @@ package l2plugin_test import ( + "testing" + "git.fd.io/govpp.git/adapter/mock" "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/logging" @@ -29,7 +31,6 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/model/l2" "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" - "testing" ) func bdConfigTestInitialization(t *testing.T) (*vppcallmock.TestCtx, *core.Connection, ifaceidx.SwIfIndexRW, chan l2plugin.BridgeDomainStateMessage, *l2plugin.BDConfigurator, error) { diff --git a/plugins/vpp/l2plugin/data_resync.go b/plugins/vpp/l2plugin/data_resync.go index 37497448a9..a3747a0d4d 100644 --- a/plugins/vpp/l2plugin/data_resync.go +++ b/plugins/vpp/l2plugin/data_resync.go @@ -18,8 +18,6 @@ import ( "strings" "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/l2idx" - "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" - "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppdump" "github.com/ligato/vpp-agent/plugins/vpp/model/l2" ) @@ -37,7 +35,7 @@ func (plugin *BDConfigurator) Resync(nbBDs []*l2.BridgeDomains_BridgeDomain) err plugin.clearMapping() // Dump current state of the VPP bridge domains - vppBDs, err := vppdump.DumpBridgeDomains(plugin.vppChan, plugin.stopwatch) + vppBDs, err := plugin.bdHandler.DumpBridgeDomains() if err != nil { return err } @@ -113,14 +111,12 @@ func (plugin *BDConfigurator) Resync(nbBDs []*l2.BridgeDomains_BridgeDomain) err } // Remove interfaces from bridge domain. Attempt to unset interface which does not belong to the bridge domain // does not cause an error - if _, err := vppcalls.UnsetInterfacesFromBridgeDomain(nbBD.Name, vppBDIdx, interfacesToUnset, plugin.ifIndexes, plugin.log, - plugin.vppChan, nil); err != nil { + if _, err := plugin.bdHandler.UnsetInterfacesFromBridgeDomain(nbBD.Name, vppBDIdx, interfacesToUnset, plugin.ifIndexes); err != nil { return err } // Set all new interfaces to the bridge domain // todo there is no need to calculate diff from configured interfaces, because currently all available interfaces are set here - configuredIfs, err := vppcalls.SetInterfacesToBridgeDomain(nbBD.Name, vppBDIdx, nbBD.Interfaces, plugin.ifIndexes, plugin.log, - plugin.vppChan, nil) + configuredIfs, err := plugin.bdHandler.SetInterfacesToBridgeDomain(nbBD.Name, vppBDIdx, nbBD.Interfaces, plugin.ifIndexes) if err != nil { return err } @@ -128,8 +124,7 @@ func (plugin *BDConfigurator) Resync(nbBDs []*l2.BridgeDomains_BridgeDomain) err // todo VPP does not support ARP dump, they can be only added at this time // Resolve new ARP entries for _, arpEntry := range nbBD.ArpTerminationTable { - if err := vppcalls.VppAddArpTerminationTableEntry(vppBDIdx, arpEntry.PhysAddress, arpEntry.IpAddress, - plugin.log, plugin.vppChan, nil); err != nil { + if err := plugin.bdHandler.VppAddArpTerminationTableEntry(vppBDIdx, arpEntry.PhysAddress, arpEntry.IpAddress); err != nil { plugin.log.Error(err) wasErr = err } @@ -172,7 +167,7 @@ func (plugin *FIBConfigurator) Resync(nbFIBs []*l2.FibTable_FibEntry) error { plugin.clearMapping() // Get all FIB entries configured on the VPP - vppFIBs, err := vppdump.DumpFIBTableEntries(plugin.syncChannel, plugin.stopwatch) + vppFIBs, err := plugin.fibHandler.DumpFIBTableEntries() if err != nil { return err } @@ -268,7 +263,7 @@ func (plugin *XConnectConfigurator) Resync(nbXConns []*l2.XConnectPairs_XConnect plugin.clearMapping() // Read cross connects from the VPP - vppXConns, err := vppdump.DumpXConnectPairs(plugin.vppChan, plugin.stopwatch) + vppXConns, err := plugin.xcHandler.DumpXConnectPairs() if err != nil { return err } diff --git a/plugins/vpp/l2plugin/fib_config.go b/plugins/vpp/l2plugin/fib_config.go index 680434eb2d..18508e3e00 100644 --- a/plugins/vpp/l2plugin/fib_config.go +++ b/plugins/vpp/l2plugin/fib_config.go @@ -46,7 +46,7 @@ type FIBConfigurator struct { fibIndexSeq uint32 // VPP binary api call helper - vppcalls *vppcalls.L2FibVppCalls + fibHandler vppcalls.FibVppAPI // VPP channels syncChannel govppapi.Channel @@ -57,11 +57,17 @@ type FIBConfigurator struct { } // Init goroutines, mappings, channels.. -func (plugin *FIBConfigurator) Init(logger logging.PluginLogger, goVppMux govppmux.API, swIfIndexes ifaceidx.SwIfIndex, bdIndexes l2idx.BDIndex, enableStopwatch bool) (err error) { +func (plugin *FIBConfigurator) Init(logger logging.PluginLogger, goVppMux govppmux.API, swIfIndexes ifaceidx.SwIfIndex, + bdIndexes l2idx.BDIndex, enableStopwatch bool) (err error) { // Logger plugin.log = logger.NewLogger("-l2-fib-conf") plugin.log.Debug("Initializing L2 Bridge domains") + // Stopwatch + if enableStopwatch { + plugin.stopwatch = measure.NewStopwatch("FIBConfigurator", plugin.log) + } + // Mappings plugin.ifIndexes = swIfIndexes plugin.bdIndexes = bdIndexes @@ -80,21 +86,19 @@ func (plugin *FIBConfigurator) Init(logger logging.PluginLogger, goVppMux govppm return err } - // Stopwatch - if enableStopwatch { - plugin.stopwatch = measure.NewStopwatch("FIBConfigurator", plugin.log) - } - // Message compatibility if err := plugin.syncChannel.CheckMessageCompatibility(vppcalls.L2FibMessages...); err != nil { return err } // VPP calls helper object - plugin.vppcalls = vppcalls.NewL2FibVppCalls(plugin.log, plugin.asyncChannel, plugin.stopwatch) + requestChan := make(chan *vppcalls.FibLogicalReq) + if plugin.fibHandler, err = vppcalls.NewFibVppHandler(plugin.syncChannel, plugin.asyncChannel, requestChan, plugin.log, plugin.stopwatch); err != nil { + return err + } // FIB reply watcher - go plugin.vppcalls.WatchFIBReplies() + go plugin.fibHandler.WatchFIBReplies() return nil } @@ -152,7 +156,7 @@ func (plugin *FIBConfigurator) Add(fib *l2.FibTable_FibEntry, callback func(erro } plugin.log.Debugf("Configuring FIB entry %s for bridge domain %s and interface %s", fib.PhysAddress, bdIdx, ifIdx) - return plugin.vppcalls.Add(fib.PhysAddress, bdIdx, ifIdx, fib.BridgedVirtualInterface, fib.StaticConfig, + return plugin.fibHandler.Add(fib.PhysAddress, bdIdx, ifIdx, fib.BridgedVirtualInterface, fib.StaticConfig, func(err error) { // Register plugin.fibIndexes.RegisterName(fib.PhysAddress, plugin.fibIndexSeq, fib) @@ -185,7 +189,7 @@ func (plugin *FIBConfigurator) Modify(oldFib *l2.FibTable_FibEntry, plugin.log.Debugf("FIB %s cannot be removed, bridge domain %s no longer exists", oldFib.PhysAddress, oldFib.BridgeDomain) } else { - if err := plugin.vppcalls.Delete(oldFib.PhysAddress, oldBdIdx, oldIfIdx, func(err error) { + if err := plugin.fibHandler.Delete(oldFib.PhysAddress, oldBdIdx, oldIfIdx, func(err error) { plugin.fibIndexes.UnregisterName(oldFib.PhysAddress) callback(err) }); err != nil { @@ -201,7 +205,7 @@ func (plugin *FIBConfigurator) Modify(oldFib *l2.FibTable_FibEntry, return nil } - return plugin.vppcalls.Add(newFib.PhysAddress, bdIdx, ifIdx, newFib.BridgedVirtualInterface, newFib.StaticConfig, + return plugin.fibHandler.Add(newFib.PhysAddress, bdIdx, ifIdx, newFib.BridgedVirtualInterface, newFib.StaticConfig, func(err error) { plugin.fibIndexes.RegisterName(oldFib.PhysAddress, plugin.fibIndexSeq, newFib) plugin.fibIndexSeq++ @@ -232,7 +236,7 @@ func (plugin *FIBConfigurator) Delete(fib *l2.FibTable_FibEntry, callback func(e plugin.fibIndexes.UnregisterName(fib.PhysAddress) plugin.log.Debugf("FIB %s removed from mappings", fib.PhysAddress) - return plugin.vppcalls.Delete(fib.PhysAddress, bdIdx, ifIdx, func(err error) { + return plugin.fibHandler.Delete(fib.PhysAddress, bdIdx, ifIdx, func(err error) { callback(err) }) } @@ -321,7 +325,7 @@ func (plugin *FIBConfigurator) resolveRegisteredItem(callback func(error)) error if cached { continue } - if err := plugin.vppcalls.Delete(cachedFibId, bdIdx, ifIdx, func(err error) { + if err := plugin.fibHandler.Delete(cachedFibId, bdIdx, ifIdx, func(err error) { plugin.log.Debugf("Deleting cached obsolete FIB %s", cachedFibId) // Handle registration plugin.fibIndexes.UnregisterName(cachedFibId) @@ -346,7 +350,7 @@ func (plugin *FIBConfigurator) resolveRegisteredItem(callback func(error)) error if cached { continue } - if err := plugin.vppcalls.Add(cachedFibId, bdIdx, ifIdx, fibData.BridgedVirtualInterface, fibData.StaticConfig, func(err error) { + if err := plugin.fibHandler.Add(cachedFibId, bdIdx, ifIdx, fibData.BridgedVirtualInterface, fibData.StaticConfig, func(err error) { plugin.log.Infof("Configuring cached FIB %s", cachedFibId) // Handle registration plugin.fibIndexes.RegisterName(cachedFibId, plugin.fibIndexSeq, fibData) diff --git a/plugins/vpp/l2plugin/vppcalls/api_vppcalls.go b/plugins/vpp/l2plugin/vppcalls/api_vppcalls.go new file mode 100644 index 0000000000..0f3a2a41e1 --- /dev/null +++ b/plugins/vpp/l2plugin/vppcalls/api_vppcalls.go @@ -0,0 +1,175 @@ +// Copyright (c) 2017 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vppcalls + +import ( + govppapi "git.fd.io/govpp.git/api" + "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/logging/measure" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" + "github.com/ligato/vpp-agent/plugins/vpp/model/l2" +) + +// BridgeDomainVppAPI provides methods for managing bridge domains +type BridgeDomainVppAPI interface { + BridgeDomainVppWrite + BridgeDomainVppRead +} + +// BridgeDomainVppWrite provides write methods for bridge domains +type BridgeDomainVppWrite interface { + // VppAddBridgeDomain adds new bridge domain. + VppAddBridgeDomain(bdIdx uint32, bd *l2.BridgeDomains_BridgeDomain) error + // VppDeleteBridgeDomain removes existing bridge domain. + VppDeleteBridgeDomain(bdIdx uint32) error + // SetInterfacesToBridgeDomain attempts to set all provided interfaces to bridge domain. It returns a list of interfaces + // which were successfully set. + SetInterfacesToBridgeDomain(bdName string, bdIdx uint32, bdIfs []*l2.BridgeDomains_BridgeDomain_Interfaces, + swIfIndices ifaceidx.SwIfIndex) (ifs []string, wasErr error) + // UnsetInterfacesFromBridgeDomain removes all interfaces from bridge domain. It returns a list of interfaces + // which were successfully unset. + UnsetInterfacesFromBridgeDomain(bdName string, bdIdx uint32, bdIfs []*l2.BridgeDomains_BridgeDomain_Interfaces, + swIfIndices ifaceidx.SwIfIndex) (ifs []string, wasErr error) + // VppAddArpTerminationTableEntry creates ARP termination entry for bridge domain. + VppAddArpTerminationTableEntry(bdID uint32, mac string, ip string) error + // VppRemoveArpTerminationTableEntry removes ARP termination entry from bridge domain + VppRemoveArpTerminationTableEntry(bdID uint32, mac string, ip string) error +} + +// BridgeDomainVppRead provides read methods for bridge domains +type BridgeDomainVppRead interface { + // DumpBridgeDomainIDs lists all configured bridge domains. Auxiliary method for LookupFIBEntries. + // returns list of bridge domain IDs (BD IDs). First element of returned slice is 0. It is default BD to which all + // interfaces belong + DumpBridgeDomainIDs() ([]uint32, error) + // DumpBridgeDomains dumps VPP bridge domain data into the northbound API data structure + // map indexed by bridge domain ID. + // + // LIMITATIONS: + // - not able to dump ArpTerminationTable - missing binary API + // + DumpBridgeDomains() (map[uint32]*BridgeDomain, error) +} + +// FibVppAPI provides methods for managing FIBs +type FibVppAPI interface { + FibVppWrite + FibVppRead +} + +// FibVppWrite provides write methods for FIBs +type FibVppWrite interface { + // Add creates L2 FIB table entry. + Add(mac string, bdID uint32, ifIdx uint32, bvi bool, static bool, callback func(error)) error + // Delete removes existing L2 FIB table entry. + Delete(mac string, bdID uint32, ifIdx uint32, callback func(error)) error +} + +// FibVppRead provides read methods for FIBs +type FibVppRead interface { + // DumpFIBTableEntries dumps VPP FIB table entries into the northbound API data structure + // map indexed by destination MAC address. + DumpFIBTableEntries() (map[string]*FIBTableEntry, error) + // WatchFIBReplies handles L2 FIB add/del requests + WatchFIBReplies() +} + +// XConnectVppAPI provides methods for managing cross connects +type XConnectVppAPI interface { + XConnectVppWrite + XConnectVppRead +} + +// XConnectVppWrite provides write methods for cross connects +type XConnectVppWrite interface { + // AddL2XConnect creates xConnect between two existing interfaces. + AddL2XConnect(rxIfIdx uint32, txIfIdx uint32) error + // DeleteL2XConnect removes xConnect between two interfaces. + DeleteL2XConnect(rxIfIdx uint32, txIfIdx uint32) error +} + +// XConnectVppRead provides read methods for cross connects +type XConnectVppRead interface { + // DumpXConnectPairs dumps VPP xconnect pair data into the northbound API data structure + // map indexed by rx interface index. + DumpXConnectPairs() (map[uint32]*XConnectPairs, error) +} + +// bridgeDomainVppHandler is accessor for bridge domain-related vppcalls methods +type bridgeDomainVppHandler struct { + stopwatch *measure.Stopwatch + callsChannel govppapi.Channel + log logging.Logger +} + +// fibVppHandler is accessor for FIB-related vppcalls methods +type fibVppHandler struct { + stopwatch *measure.Stopwatch + syncCallsChannel govppapi.Channel + asyncCallsChannel govppapi.Channel + requestChan chan *FibLogicalReq + log logging.Logger +} + +// xConnectVppHandler is accessor for cross-connect-related vppcalls methods +type xConnectVppHandler struct { + stopwatch *measure.Stopwatch + callsChannel govppapi.Channel + log logging.Logger +} + +// NewBridgeDomainVppHandler creates new instance of bridge domain vppcalls handler +func NewBridgeDomainVppHandler(callsChan govppapi.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*bridgeDomainVppHandler, error) { + handler := &bridgeDomainVppHandler{ + callsChannel: callsChan, + stopwatch: stopwatch, + log: log, + } + if err := handler.callsChannel.CheckMessageCompatibility(BridgeDomainMessages...); err != nil { + return nil, err + } + + return handler, nil +} + +// NewFibVppHandler creates new instance of FIB vppcalls handler +func NewFibVppHandler(syncChan, asyncChan govppapi.Channel, reqChan chan *FibLogicalReq, log logging.Logger, stopwatch *measure.Stopwatch) (*fibVppHandler, error) { + handler := &fibVppHandler{ + syncCallsChannel: syncChan, + asyncCallsChannel: asyncChan, + requestChan: reqChan, + stopwatch: stopwatch, + log: log, + } + if err := handler.syncCallsChannel.CheckMessageCompatibility(L2FibMessages...); err != nil { + return nil, err + } + + return handler, nil +} + +// NewXConnectVppHandler creates new instance of cross connect vppcalls handler +func NewXConnectVppHandler(callsChan govppapi.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*xConnectVppHandler, error) { + handler := &xConnectVppHandler{ + callsChannel: callsChan, + stopwatch: stopwatch, + log: log, + } + if err := handler.callsChannel.CheckMessageCompatibility(XConnectMessages...); err != nil { + return nil, err + } + + return handler, nil +} diff --git a/plugins/vpp/l2plugin/vppcalls/arp_term_vppcalls.go b/plugins/vpp/l2plugin/vppcalls/arp_term_vppcalls.go index 942c7eb0a2..5285a602c1 100644 --- a/plugins/vpp/l2plugin/vppcalls/arp_term_vppcalls.go +++ b/plugins/vpp/l2plugin/vppcalls/arp_term_vppcalls.go @@ -19,16 +19,14 @@ import ( "net" "time" - govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/cn-infra/utils/addrs" l2ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" ) -func callBdIPMacAddDel(isAdd bool, bdID uint32, mac string, ip string, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *bridgeDomainVppHandler) callBdIPMacAddDel(isAdd bool, bdID uint32, mac string, ip string) error { defer func(t time.Time) { - stopwatch.TimeLog(l2ba.BdIPMacAddDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(l2ba.BdIPMacAddDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &l2ba.BdIPMacAddDel{ @@ -61,7 +59,7 @@ func callBdIPMacAddDel(isAdd bool, bdID uint32, mac string, ip string, vppChan g } reply := &l2ba.BdIPMacAddDelReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -71,32 +69,28 @@ func callBdIPMacAddDel(isAdd bool, bdID uint32, mac string, ip string, vppChan g return nil } -// VppAddArpTerminationTableEntry creates ARP termination entry for bridge domain. -func VppAddArpTerminationTableEntry(bdID uint32, mac string, ip string, log logging.Logger, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - log.Info("Adding ARP termination entry") +func (handler *bridgeDomainVppHandler) VppAddArpTerminationTableEntry(bdID uint32, mac string, ip string) error { + handler.log.Info("Adding ARP termination entry") - err := callBdIPMacAddDel(true, bdID, mac, ip, vppChan, stopwatch) + err := handler.callBdIPMacAddDel(true, bdID, mac, ip) if err != nil { return err } - log.WithFields(logging.Fields{"bdID": bdID, "MAC": mac, "IP": ip}). - Debug("ARP termination entry added") + handler.log.WithFields(logging.Fields{"bdID": bdID, "MAC": mac, "IP": ip}).Debug("ARP termination entry added") return nil } -// VppRemoveArpTerminationTableEntry removes ARP termination entry from bridge domain -func VppRemoveArpTerminationTableEntry(bdID uint32, mac string, ip string, log logging.Logger, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - log.Info("Removing ARP termination entry") +func (handler *bridgeDomainVppHandler) VppRemoveArpTerminationTableEntry(bdID uint32, mac string, ip string) error { + handler.log.Info("Removing ARP termination entry") - err := callBdIPMacAddDel(false, bdID, mac, ip, vppChan, stopwatch) + err := handler.callBdIPMacAddDel(false, bdID, mac, ip) if err != nil { return err } - log.WithFields(logging.Fields{"bdID": bdID, "MAC": mac, "IP": ip}). - Debug("ARP termination entry removed") + handler.log.WithFields(logging.Fields{"bdID": bdID, "MAC": mac, "IP": ip}).Debug("ARP termination entry removed") return nil } diff --git a/plugins/vpp/l2plugin/vppcalls/arp_term_vppcalls_test.go b/plugins/vpp/l2plugin/vppcalls/arp_term_vppcalls_test.go index 79bd7b9a49..1a4a4cb114 100644 --- a/plugins/vpp/l2plugin/vppcalls/arp_term_vppcalls_test.go +++ b/plugins/vpp/l2plugin/vppcalls/arp_term_vppcalls_test.go @@ -17,24 +17,18 @@ package vppcalls_test import ( "testing" - "github.com/ligato/cn-infra/logging/logrus" l2ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" - "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) -var dummyLogger = logrus.NewLogger("dummy") - func TestVppAddArpTerminationTableEntry(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BdIPMacAddDelReply{}) - err := vppcalls.VppAddArpTerminationTableEntry( - 4, "FF:FF:FF:FF:FF:FF", "192.168.4.4", - dummyLogger, ctx.MockChannel, nil) + err := bdHandler.VppAddArpTerminationTableEntry( + 4, "FF:FF:FF:FF:FF:FF", "192.168.4.4") Expect(err).ShouldNot(HaveOccurred()) Expect(ctx.MockChannel.Msg).To(Equal(&l2ba.BdIPMacAddDel{ @@ -47,14 +41,12 @@ func TestVppAddArpTerminationTableEntry(t *testing.T) { } func TestVppAddArpTerminationTableEntryIPv6(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BdIPMacAddDelReply{}) - err := vppcalls.VppAddArpTerminationTableEntry( - 4, "FF:FF:FF:FF:FF:FF", "2001:db9::54", - dummyLogger, ctx.MockChannel, nil) + err := bdHandler.VppAddArpTerminationTableEntry(4, "FF:FF:FF:FF:FF:FF", "2001:db9::54") Expect(err).ShouldNot(HaveOccurred()) Expect(ctx.MockChannel.Msg).To(Equal(&l2ba.BdIPMacAddDel{ @@ -67,14 +59,12 @@ func TestVppAddArpTerminationTableEntryIPv6(t *testing.T) { } func TestVppRemoveArpTerminationTableEntry(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BdIPMacAddDelReply{}) - err := vppcalls.VppRemoveArpTerminationTableEntry( - 4, "FF:FF:FF:FF:FF:FF", "192.168.4.4", - dummyLogger, ctx.MockChannel, nil) + err := bdHandler.VppRemoveArpTerminationTableEntry(4, "FF:FF:FF:FF:FF:FF", "192.168.4.4") Expect(err).ShouldNot(HaveOccurred()) Expect(ctx.MockChannel.Msg).To(Equal(&l2ba.BdIPMacAddDel{ @@ -87,56 +77,44 @@ func TestVppRemoveArpTerminationTableEntry(t *testing.T) { } func TestVppArpTerminationTableEntryMacError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BdIPMacAddDelReply{}) - err := vppcalls.VppAddArpTerminationTableEntry( - 4, "in:va:li:d:ma:c", "192.168.4.4", - dummyLogger, ctx.MockChannel, nil) + err := bdHandler.VppAddArpTerminationTableEntry(4, "in:va:li:d:ma:c", "192.168.4.4") Expect(err).Should(HaveOccurred()) - err = vppcalls.VppRemoveArpTerminationTableEntry( - 4, "in:va:li:d:ma:c", "192.168.4.4", - dummyLogger, ctx.MockChannel, nil) + err = bdHandler.VppRemoveArpTerminationTableEntry(4, "in:va:li:d:ma:c", "192.168.4.4") Expect(err).Should(HaveOccurred()) } func TestVppArpTerminationTableEntryIpError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BdIPMacAddDelReply{}) - err := vppcalls.VppAddArpTerminationTableEntry( - 4, "FF:FF:FF:FF:FF:FF", "", - dummyLogger, ctx.MockChannel, nil) + err := bdHandler.VppAddArpTerminationTableEntry(4, "FF:FF:FF:FF:FF:FF", "") Expect(err).Should(HaveOccurred()) - err = vppcalls.VppRemoveArpTerminationTableEntry( - 4, "FF:FF:FF:FF:FF:FF", "", - dummyLogger, ctx.MockChannel, nil) + err = bdHandler.VppRemoveArpTerminationTableEntry(4, "FF:FF:FF:FF:FF:FF", "") Expect(err).Should(HaveOccurred()) } func TestVppArpTerminationTableEntryError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BdIPMacAddDelReply{ Retval: 1, }) - err := vppcalls.VppAddArpTerminationTableEntry( - 4, "FF:FF:FF:FF:FF:FF", "192.168.4.4", - dummyLogger, ctx.MockChannel, nil) + err := bdHandler.VppAddArpTerminationTableEntry(4, "FF:FF:FF:FF:FF:FF", "192.168.4.4") Expect(err).Should(HaveOccurred()) ctx.MockVpp.MockReply(&l2ba.BridgeDomainAddDelReply{}) - err = vppcalls.VppRemoveArpTerminationTableEntry( - 4, "FF:FF:FF:FF:FF:FF", "192.168.4.4", - dummyLogger, ctx.MockChannel, nil) + err = bdHandler.VppRemoveArpTerminationTableEntry(4, "FF:FF:FF:FF:FF:FF", "192.168.4.4") Expect(err).Should(HaveOccurred()) } diff --git a/plugins/vpp/l2plugin/vppcalls/bridge_domain_vppcalls.go b/plugins/vpp/l2plugin/vppcalls/bridge_domain_vppcalls.go index e55c538b5d..74eb037bc8 100644 --- a/plugins/vpp/l2plugin/vppcalls/bridge_domain_vppcalls.go +++ b/plugins/vpp/l2plugin/vppcalls/bridge_domain_vppcalls.go @@ -19,7 +19,6 @@ import ( "time" govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" l2ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" "github.com/ligato/vpp-agent/plugins/vpp/model/l2" ) @@ -34,10 +33,9 @@ var BridgeDomainMessages = []govppapi.Message{ &l2ba.SwInterfaceSetL2BridgeReply{}, } -// VppAddBridgeDomain adds new bridge domain. -func VppAddBridgeDomain(bdIdx uint32, bd *l2.BridgeDomains_BridgeDomain, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *bridgeDomainVppHandler) VppAddBridgeDomain(bdIdx uint32, bd *l2.BridgeDomains_BridgeDomain) error { defer func(t time.Time) { - stopwatch.TimeLog(l2ba.BridgeDomainAddDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(l2ba.BridgeDomainAddDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &l2ba.BridgeDomainAddDel{ @@ -53,7 +51,7 @@ func VppAddBridgeDomain(bdIdx uint32, bd *l2.BridgeDomains_BridgeDomain, vppChan } reply := &l2ba.BridgeDomainAddDelReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -63,10 +61,9 @@ func VppAddBridgeDomain(bdIdx uint32, bd *l2.BridgeDomains_BridgeDomain, vppChan return nil } -// VppDeleteBridgeDomain removes existing bridge domain. -func VppDeleteBridgeDomain(bdIdx uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *bridgeDomainVppHandler) VppDeleteBridgeDomain(bdIdx uint32) error { defer func(t time.Time) { - stopwatch.TimeLog(l2ba.BridgeDomainAddDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(l2ba.BridgeDomainAddDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &l2ba.BridgeDomainAddDel{ @@ -75,7 +72,7 @@ func VppDeleteBridgeDomain(bdIdx uint32, vppChan govppapi.Channel, stopwatch *me } reply := &l2ba.BridgeDomainAddDelReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { diff --git a/plugins/vpp/l2plugin/vppcalls/bridge_domain_vppcalls_test.go b/plugins/vpp/l2plugin/vppcalls/bridge_domain_vppcalls_test.go index b3ef357e18..46b4037772 100644 --- a/plugins/vpp/l2plugin/vppcalls/bridge_domain_vppcalls_test.go +++ b/plugins/vpp/l2plugin/vppcalls/bridge_domain_vppcalls_test.go @@ -17,6 +17,7 @@ package vppcalls_test import ( "testing" + "github.com/ligato/cn-infra/logging/logrus" l2ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/l2" @@ -83,51 +84,59 @@ var deleteTestDataOutBd *l2ba.BridgeDomainAddDel = &l2ba.BridgeDomainAddDel{ } func TestVppAddBridgeDomain(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BridgeDomainAddDelReply{}) - err := vppcalls.VppAddBridgeDomain(dummyBridgeDomain, createTestDataInBD, ctx.MockChannel, nil) + err := bdHandler.VppAddBridgeDomain(dummyBridgeDomain, createTestDataInBD) Expect(err).ShouldNot(HaveOccurred()) Expect(ctx.MockChannel.Msg).To(Equal(createTestDataOutBD)) } func TestVppAddBridgeDomainError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BridgeDomainAddDelReply{Retval: 1}) ctx.MockVpp.MockReply(&l2ba.SwInterfaceSetL2Bridge{}) - err := vppcalls.VppAddBridgeDomain(dummyBridgeDomain, createTestDataInBD, ctx.MockChannel, nil) + err := bdHandler.VppAddBridgeDomain(dummyBridgeDomain, createTestDataInBD) Expect(err).Should(HaveOccurred()) - err = vppcalls.VppAddBridgeDomain(dummyBridgeDomain, createTestDataInBD, ctx.MockChannel, nil) + err = bdHandler.VppAddBridgeDomain(dummyBridgeDomain, createTestDataInBD) Expect(err).Should(HaveOccurred()) } func TestVppDeleteBridgeDomain(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BridgeDomainAddDelReply{}) - err := vppcalls.VppDeleteBridgeDomain(dummyBridgeDomain, ctx.MockChannel, nil) + err := bdHandler.VppDeleteBridgeDomain(dummyBridgeDomain) Expect(err).ShouldNot(HaveOccurred()) Expect(ctx.MockChannel.Msg).To(Equal(deleteTestDataOutBd)) } func TestVppDeleteBridgeDomainError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BridgeDomainAddDelReply{Retval: 1}) ctx.MockVpp.MockReply(&l2ba.SwInterfaceSetL2Bridge{}) - err := vppcalls.VppDeleteBridgeDomain(dummyBridgeDomain, ctx.MockChannel, nil) + err := bdHandler.VppDeleteBridgeDomain(dummyBridgeDomain) Expect(err).Should(HaveOccurred()) - err = vppcalls.VppDeleteBridgeDomain(dummyBridgeDomain, ctx.MockChannel, nil) + err = bdHandler.VppDeleteBridgeDomain(dummyBridgeDomain) Expect(err).Should(HaveOccurred()) } + +func bdTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.BridgeDomainVppAPI) { + ctx := vppcallmock.SetupTestCtx(t) + log := logrus.NewLogger("test-log") + bdHandler, err := vppcalls.NewBridgeDomainVppHandler(ctx.MockChannel, log, nil) + Expect(err).To(BeNil()) + return ctx, bdHandler +} diff --git a/plugins/vpp/l2plugin/vppcalls/doc.go b/plugins/vpp/l2plugin/vppcalls/doc.go index 2406999863..eb649f3844 100644 --- a/plugins/vpp/l2plugin/vppcalls/doc.go +++ b/plugins/vpp/l2plugin/vppcalls/doc.go @@ -1,3 +1,3 @@ // Package vppcalls contains wrappers over VPP binary APIs for bridge-domains, -// and L2 FIBs and XConnect pairs. +// and L2 FIBs and XConnect pairs and helpers for dumping them. package vppcalls diff --git a/plugins/vpp/l2plugin/vppdump/dump_vppcalls.go b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go similarity index 72% rename from plugins/vpp/l2plugin/vppdump/dump_vppcalls.go rename to plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go index 57e3475501..de4882ba3b 100644 --- a/plugins/vpp/l2plugin/vppdump/dump_vppcalls.go +++ b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go @@ -12,30 +12,25 @@ // See the License for the specific language governing permissions and // limitations under the License. -package vppdump +package vppcalls import ( "bytes" "net" "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" l2ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" l2nb "github.com/ligato/vpp-agent/plugins/vpp/model/l2" ) -// DumpBridgeDomainIDs lists all configured bridge domains. Auxiliary method for LookupFIBEntries. -// returns list of bridge domain IDs (BD IDs). First element of returned slice is 0. It is default BD to which all -// interfaces belong -func DumpBridgeDomainIDs(vppChannel govppapi.Channel, stopwatch *measure.Stopwatch) ([]uint32, error) { +func (handler *bridgeDomainVppHandler) DumpBridgeDomainIDs() ([]uint32, error) { defer func(t time.Time) { - stopwatch.TimeLog(l2ba.BridgeDomainDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(l2ba.BridgeDomainDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &l2ba.BridgeDomainDump{BdID: ^uint32(0)} activeDomains := make([]uint32, 1) - reqCtx := vppChannel.SendMultiRequest(req) + reqCtx := handler.callsChannel.SendMultiRequest(req) for { msg := &l2ba.BridgeDomainDetails{} stop, err := reqCtx.ReceiveReply(msg) @@ -64,22 +59,16 @@ type BridgeDomainInterface struct { l2nb.BridgeDomains_BridgeDomain_Interfaces } -// DumpBridgeDomains dumps VPP bridge domain data into the northbound API data structure -// map indexed by bridge domain ID. -// -// LIMITATIONS: -// - not able to dump ArpTerminationTable - missing binary API -// -func DumpBridgeDomains(vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (map[uint32]*BridgeDomain, error) { +func (handler *bridgeDomainVppHandler) DumpBridgeDomains() (map[uint32]*BridgeDomain, error) { defer func(t time.Time) { - stopwatch.TimeLog(l2ba.BridgeDomainDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(l2ba.BridgeDomainDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) // map for the resulting BDs bds := make(map[uint32]*BridgeDomain) // First, dump all interfaces to create initial data. - reqCtx := vppChan.SendMultiRequest(&l2ba.BridgeDomainDump{BdID: ^uint32(0)}) + reqCtx := handler.callsChannel.SendMultiRequest(&l2ba.BridgeDomainDump{BdID: ^uint32(0)}) for { bdDetails := &l2ba.BridgeDomainDetails{} @@ -124,17 +113,15 @@ type FIBTableEntry struct { l2nb.FibTable_FibEntry } -// DumpFIBTableEntries dumps VPP FIB table entries into the northbound API data structure -// map indexed by destination MAC address. -func DumpFIBTableEntries(vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (map[string]*FIBTableEntry, error) { +func (handler *fibVppHandler) DumpFIBTableEntries() (map[string]*FIBTableEntry, error) { defer func(t time.Time) { - stopwatch.TimeLog(l2ba.L2FibTableDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(l2ba.L2FibTableDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) // map for the resulting FIBs fibs := make(map[string]*FIBTableEntry) - reqCtx := vppChan.SendMultiRequest(&l2ba.L2FibTableDump{BdID: ^uint32(0)}) + reqCtx := handler.syncCallsChannel.SendMultiRequest(&l2ba.L2FibTableDump{BdID: ^uint32(0)}) for { fibDetails := &l2ba.L2FibTableDetails{} stop, err := reqCtx.ReceiveReply(fibDetails) @@ -174,17 +161,15 @@ type XConnectPairs struct { TransmitInterfaceSwIfIdx uint32 `json:"transmit_interface_sw_if_idx"` } -// DumpXConnectPairs dumps VPP xconnect pair data into the northbound API data structure -// map indexed by rx interface index. -func DumpXConnectPairs(vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (map[uint32]*XConnectPairs, error) { +func (handler *xConnectVppHandler) DumpXConnectPairs() (map[uint32]*XConnectPairs, error) { defer func(t time.Time) { - stopwatch.TimeLog(l2ba.L2XconnectDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(l2ba.L2XconnectDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) // map for the resulting xconnect pairs xpairs := make(map[uint32]*XConnectPairs) - reqCtx := vppChan.SendMultiRequest(&l2ba.L2XconnectDump{}) + reqCtx := handler.callsChannel.SendMultiRequest(&l2ba.L2XconnectDump{}) for { pairs := &l2ba.L2XconnectDetails{} stop, err := reqCtx.ReceiveReply(pairs) diff --git a/plugins/vpp/l2plugin/vppdump/dump_vppcalls_test.go b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go similarity index 87% rename from plugins/vpp/l2plugin/vppdump/dump_vppcalls_test.go rename to plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go index 2752cbd519..f65a2289e2 100644 --- a/plugins/vpp/l2plugin/vppdump/dump_vppcalls_test.go +++ b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package vppdump +package vppcalls_test import ( "testing" @@ -20,8 +20,8 @@ import ( govppapi "git.fd.io/govpp.git/api" "git.fd.io/govpp.git/core/bin_api/vpe" l2ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" + "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" l2nb "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) @@ -44,9 +44,9 @@ var testDataInMessagesBDs = []govppapi.Message{ }, } -var testDataOutMessage = []*BridgeDomain{ +var testDataOutMessage = []*vppcalls.BridgeDomain{ { - Interfaces: []*BridgeDomainInterface{ + Interfaces: []*vppcalls.BridgeDomainInterface{ {SwIfIndex: 5}, {SwIfIndex: 7}, }, @@ -58,7 +58,7 @@ var testDataOutMessage = []*BridgeDomain{ ArpTermination: true, MacAge: 140}, }, { - Interfaces: []*BridgeDomainInterface{ + Interfaces: []*vppcalls.BridgeDomainInterface{ {SwIfIndex: 5}, {SwIfIndex: 8}, }, @@ -76,19 +76,19 @@ var testDataOutMessage = []*BridgeDomain{ // - 2 bridge domains + 1 default in VPP // TestDumpBridgeDomainIDs tests DumpBridgeDomainIDs method func TestDumpBridgeDomainIDs(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(testDataInMessagesBDs...) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - activeDomains, err := DumpBridgeDomainIDs(ctx.MockChannel, nil) + activeDomains, err := bdHandler.DumpBridgeDomainIDs() Expect(err).To(BeNil()) Expect(activeDomains).To(Equal([]uint32{0, 4, 5})) ctx.MockVpp.MockReply(&l2ba.BridgeDomainAddDelReply{}) - _, err = DumpBridgeDomainIDs(ctx.MockChannel, nil) + _, err = bdHandler.DumpBridgeDomainIDs() Expect(err).Should(HaveOccurred()) } @@ -96,13 +96,13 @@ func TestDumpBridgeDomainIDs(t *testing.T) { // - 2 bridge domains + 1 default in VPP // TestDumpBridgeDomains tests DumpBridgeDomains method func TestDumpBridgeDomains(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(testDataInMessagesBDs...) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - bridgeDomains, err := DumpBridgeDomains(ctx.MockChannel, nil) + bridgeDomains, err := bdHandler.DumpBridgeDomains() Expect(err).To(BeNil()) Expect(bridgeDomains).To(HaveLen(2)) @@ -110,7 +110,7 @@ func TestDumpBridgeDomains(t *testing.T) { Expect(bridgeDomains[5]).To(Equal(testDataOutMessage[1])) ctx.MockVpp.MockReply(&l2ba.BridgeDomainAddDelReply{}) - _, err = DumpBridgeDomains(ctx.MockChannel, nil) + _, err = bdHandler.DumpBridgeDomains() Expect(err).Should(HaveOccurred()) } @@ -127,7 +127,7 @@ var testDataInMessagesFIBs = []govppapi.Message{ }, } -var testDataOutFIBs = []*FIBTableEntry{ +var testDataOutFIBs = []*vppcalls.FIBTableEntry{ { BridgeDomainIdx: 10, OutgoingInterfaceSwIfIdx: 1, @@ -154,20 +154,20 @@ var testDataOutFIBs = []*FIBTableEntry{ // - 2 FIB entries in VPP // TestDumpFIBTableEntries tests DumpFIBTableEntries method func TestDumpFIBTableEntries(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, fibHandler := fibTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(testDataInMessagesFIBs...) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - fibTable, err := DumpFIBTableEntries(ctx.MockChannel, nil) + fibTable, err := fibHandler.DumpFIBTableEntries() Expect(err).To(BeNil()) Expect(fibTable).To(HaveLen(2)) Expect(fibTable["aa:aa:aa:aa:aa:aa"]).To(Equal(testDataOutFIBs[0])) Expect(fibTable["bb:bb:bb:bb:bb:bb"]).To(Equal(testDataOutFIBs[1])) ctx.MockVpp.MockReply(&l2ba.BridgeDomainAddDelReply{}) - _, err = DumpFIBTableEntries(ctx.MockChannel, nil) + _, err = fibHandler.DumpFIBTableEntries() Expect(err).Should(HaveOccurred()) } @@ -176,7 +176,7 @@ var testDataInXConnect = []govppapi.Message{ &l2ba.L2XconnectDetails{3, 4}, } -var testDataOutXconnect = []*XConnectPairs{ +var testDataOutXconnect = []*vppcalls.XConnectPairs{ {1, 2}, {3, 4}, } @@ -185,13 +185,13 @@ var testDataOutXconnect = []*XConnectPairs{ // - 2 Xconnect entries in VPP // TestDumpXConnectPairs tests DumpXConnectPairs method func TestDumpXConnectPairs(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, xcHandler := xcTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(testDataInXConnect...) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - xConnectPairs, err := DumpXConnectPairs(ctx.MockChannel, nil) + xConnectPairs, err := xcHandler.DumpXConnectPairs() Expect(err).To(BeNil()) Expect(xConnectPairs).To(HaveLen(2)) @@ -199,7 +199,7 @@ func TestDumpXConnectPairs(t *testing.T) { Expect(xConnectPairs[3]).To(Equal(testDataOutXconnect[1])) ctx.MockVpp.MockReply(&l2ba.BridgeDomainAddDelReply{}) - _, err = DumpXConnectPairs(ctx.MockChannel, nil) + _, err = xcHandler.DumpXConnectPairs() Expect(err).Should(HaveOccurred()) } diff --git a/plugins/vpp/l2plugin/vppcalls/interface_vppcalls.go b/plugins/vpp/l2plugin/vppcalls/interface_vppcalls.go index 2c74074ad3..97338cf0ee 100644 --- a/plugins/vpp/l2plugin/vppcalls/interface_vppcalls.go +++ b/plugins/vpp/l2plugin/vppcalls/interface_vppcalls.go @@ -18,25 +18,20 @@ import ( "fmt" "time" - govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/measure" l2ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/model/l2" ) -// SetInterfacesToBridgeDomain attempts to set all provided interfaces to bridge domain. It returns a list of interfaces -// which were successfully set. -func SetInterfacesToBridgeDomain(bdName string, bdIdx uint32, bdIfs []*l2.BridgeDomains_BridgeDomain_Interfaces, - swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (ifs []string, wasErr error) { - +func (handler *bridgeDomainVppHandler) SetInterfacesToBridgeDomain(bdName string, bdIdx uint32, bdIfs []*l2.BridgeDomains_BridgeDomain_Interfaces, + swIfIndices ifaceidx.SwIfIndex) (ifs []string, wasErr error) { defer func(t time.Time) { - stopwatch.TimeLog(l2ba.SwInterfaceSetL2Bridge{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(l2ba.SwInterfaceSetL2Bridge{}).LogTimeEntry(time.Since(t)) }(time.Now()) if len(bdIfs) == 0 { - log.Debugf("Bridge domain %v has no new interface to set", bdName) + handler.log.Debugf("Bridge domain %v has no new interface to set", bdName) return nil, nil } @@ -44,14 +39,14 @@ func SetInterfacesToBridgeDomain(bdName string, bdIdx uint32, bdIfs []*l2.Bridge // Verify that interface exists, otherwise skip it. ifIdx, _, found := swIfIndices.LookupIdx(bdIf.Name) if !found { - log.Debugf("Required bridge domain %v interface %v not found", bdName, bdIf.Name) + handler.log.Debugf("Required bridge domain %v interface %v not found", bdName, bdIf.Name) continue } - if err := addDelInterfaceToBridgeDomain(bdName, bdIdx, bdIf, ifIdx, log, vppChan, true); err != nil { + if err := handler.addDelInterfaceToBridgeDomain(bdName, bdIdx, bdIf, ifIdx, true); err != nil { wasErr = err - log.Error(wasErr) + handler.log.Error(wasErr) } else { - log.WithFields(logging.Fields{"Interface": bdIf.Name, "BD": bdName}).Debug("Interface set to bridge domain") + handler.log.WithFields(logging.Fields{"Interface": bdIf.Name, "BD": bdName}).Debug("Interface set to bridge domain") ifs = append(ifs, bdIf.Name) } } @@ -59,17 +54,15 @@ func SetInterfacesToBridgeDomain(bdName string, bdIdx uint32, bdIfs []*l2.Bridge return ifs, wasErr } -// UnsetInterfacesFromBridgeDomain removes all interfaces from bridge domain. It returns a list of interfaces -// which were successfully unset. -func UnsetInterfacesFromBridgeDomain(bdName string, bdIdx uint32, bdIfs []*l2.BridgeDomains_BridgeDomain_Interfaces, - swIfIndices ifaceidx.SwIfIndex, log logging.Logger, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (ifs []string, wasErr error) { +func (handler *bridgeDomainVppHandler) UnsetInterfacesFromBridgeDomain(bdName string, bdIdx uint32, bdIfs []*l2.BridgeDomains_BridgeDomain_Interfaces, + swIfIndices ifaceidx.SwIfIndex) (ifs []string, wasErr error) { defer func(t time.Time) { - stopwatch.TimeLog(l2ba.SwInterfaceSetL2Bridge{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(l2ba.SwInterfaceSetL2Bridge{}).LogTimeEntry(time.Since(t)) }(time.Now()) if len(bdIfs) == 0 { - log.Debugf("Bridge domain %v has no obsolete interface to unset", bdName) + handler.log.Debugf("Bridge domain %v has no obsolete interface to unset", bdName) return nil, nil } @@ -77,14 +70,14 @@ func UnsetInterfacesFromBridgeDomain(bdName string, bdIdx uint32, bdIfs []*l2.Br // Verify that interface exists, otherwise skip it. ifIdx, _, found := swIfIndices.LookupIdx(bdIf.Name) if !found { - log.Debugf("Required bridge domain %v interface %v not found", bdName, bdIf.Name) + handler.log.Debugf("Required bridge domain %v interface %v not found", bdName, bdIf.Name) continue } - if err := addDelInterfaceToBridgeDomain(bdName, bdIdx, bdIf, ifIdx, log, vppChan, false); err != nil { + if err := handler.addDelInterfaceToBridgeDomain(bdName, bdIdx, bdIf, ifIdx, false); err != nil { wasErr = err - log.Error(wasErr) + handler.log.Error(wasErr) } else { - log.WithFields(logging.Fields{"Interface": bdIf.Name, "BD": bdName}).Debug("Interface unset from bridge domain") + handler.log.WithFields(logging.Fields{"Interface": bdIf.Name, "BD": bdName}).Debug("Interface unset from bridge domain") ifs = append(ifs, bdIf.Name) } } @@ -92,8 +85,8 @@ func UnsetInterfacesFromBridgeDomain(bdName string, bdIdx uint32, bdIfs []*l2.Br return ifs, wasErr } -func addDelInterfaceToBridgeDomain(bdName string, bdIdx uint32, bdIf *l2.BridgeDomains_BridgeDomain_Interfaces, - ifIdx uint32, log logging.Logger, vppChan govppapi.Channel, add bool) error { +func (handler *bridgeDomainVppHandler) addDelInterfaceToBridgeDomain(bdName string, bdIdx uint32, bdIf *l2.BridgeDomains_BridgeDomain_Interfaces, + ifIdx uint32, add bool) error { req := &l2ba.SwInterfaceSetL2Bridge{ BdID: bdIdx, RxSwIfIndex: ifIdx, @@ -106,11 +99,11 @@ func addDelInterfaceToBridgeDomain(bdName string, bdIdx uint32, bdIf *l2.BridgeD // Set as BVI. if bdIf.BridgedVirtualInterface { req.Bvi = 1 - log.Debugf("Interface %v set as BVI", bdIf.Name) + handler.log.Debugf("Interface %v set as BVI", bdIf.Name) } reply := &l2ba.SwInterfaceSetL2BridgeReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return fmt.Errorf("error while assigning/removing interface %v to bd %v: %v", bdIf.Name, bdName, err) } if reply.Retval != 0 { diff --git a/plugins/vpp/l2plugin/vppcalls/interface_vppcalls_test.go b/plugins/vpp/l2plugin/vppcalls/interface_vppcalls_test.go index 0572a9b7dc..8a8cd4a885 100644 --- a/plugins/vpp/l2plugin/vppcalls/interface_vppcalls_test.go +++ b/plugins/vpp/l2plugin/vppcalls/interface_vppcalls_test.go @@ -17,19 +17,16 @@ package vppcalls_test import ( "testing" - "github.com/ligato/vpp-agent/tests/vppcallmock" - "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/vpp-agent/idxvpp/nametoidx" l2Api "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" - "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/l2" . "github.com/onsi/gomega" ) func TestSetInterfacesToBridgeDomain(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2BridgeReply{}) @@ -41,7 +38,7 @@ func TestSetInterfacesToBridgeDomain(t *testing.T) { swIfIndexes.RegisterName("if2", 2, nil) swIfIndexes.RegisterName("if3", 3, nil) - configured, err := vppcalls.SetInterfacesToBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ + configured, err := bdHandler.SetInterfacesToBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ { Name: "if1", BridgedVirtualInterface: true, @@ -57,7 +54,7 @@ func TestSetInterfacesToBridgeDomain(t *testing.T) { BridgedVirtualInterface: false, SplitHorizonGroup: 2, }, - }, swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, swIfIndexes) Expect(err).To(BeNil()) Expect(len(ctx.MockChannel.Msgs)).To(BeEquivalentTo(3)) @@ -78,13 +75,13 @@ func TestSetInterfacesToBridgeDomain(t *testing.T) { } func TestSetInterfacesToBridgeDomainNoInterfaceToSet(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bd", nil)) - configured, err := vppcalls.SetInterfacesToBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{}, - swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + configured, err := bdHandler.SetInterfacesToBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{}, + swIfIndexes) Expect(err).To(BeNil()) Expect(len(ctx.MockChannel.Msgs)).To(BeEquivalentTo(0)) @@ -92,7 +89,7 @@ func TestSetInterfacesToBridgeDomainNoInterfaceToSet(t *testing.T) { } func TestSetInterfacesToBridgeDomainMissingInterface(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2BridgeReply{}) @@ -102,14 +99,14 @@ func TestSetInterfacesToBridgeDomainMissingInterface(t *testing.T) { swIfIndexes.RegisterName("if1", 1, nil) // Metadata are not required for test purpose // Interface "if2" is not registered - configured, err := vppcalls.SetInterfacesToBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ + configured, err := bdHandler.SetInterfacesToBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ { Name: "if1", }, { Name: "if2", }, - }, swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, swIfIndexes) Expect(err).To(BeNil()) Expect(len(ctx.MockChannel.Msgs)).To(BeEquivalentTo(1)) @@ -117,7 +114,7 @@ func TestSetInterfacesToBridgeDomainMissingInterface(t *testing.T) { } func TestSetInterfacesToBridgeDomainError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2Bridge{}) @@ -125,18 +122,18 @@ func TestSetInterfacesToBridgeDomainError(t *testing.T) { swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bd", nil)) swIfIndexes.RegisterName("if1", 1, nil) // Metadata are not required for test purpose - configured, err := vppcalls.SetInterfacesToBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ + configured, err := bdHandler.SetInterfacesToBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ { Name: "if1", }, - }, swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, swIfIndexes) Expect(err).ToNot(BeNil()) Expect(configured).To(BeNil()) } func TestSetInterfacesToBridgeDomainRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2BridgeReply{ @@ -146,18 +143,18 @@ func TestSetInterfacesToBridgeDomainRetval(t *testing.T) { swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bd", nil)) swIfIndexes.RegisterName("if1", 1, nil) // Metadata are not required for test purpose - configured, err := vppcalls.SetInterfacesToBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ + configured, err := bdHandler.SetInterfacesToBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ { Name: "if1", }, - }, swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, swIfIndexes) Expect(err).ToNot(BeNil()) Expect(configured).To(BeNil()) } func TestUnsetInterfacesFromBridgeDomain(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2BridgeReply{}) @@ -169,7 +166,7 @@ func TestUnsetInterfacesFromBridgeDomain(t *testing.T) { swIfIndexes.RegisterName("if2", 2, nil) swIfIndexes.RegisterName("if3", 3, nil) - configured, err := vppcalls.UnsetInterfacesFromBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ + configured, err := bdHandler.UnsetInterfacesFromBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ { Name: "if1", SplitHorizonGroup: 0, @@ -182,7 +179,7 @@ func TestUnsetInterfacesFromBridgeDomain(t *testing.T) { Name: "if3", SplitHorizonGroup: 2, }, - }, swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, swIfIndexes) Expect(err).To(BeNil()) Expect(len(ctx.MockChannel.Msgs)).To(BeEquivalentTo(3)) @@ -198,13 +195,13 @@ func TestUnsetInterfacesFromBridgeDomain(t *testing.T) { } func TestUnsetInterfacesFromBridgeDomainNoInterfaceToUnset(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bd", nil)) - configured, err := vppcalls.UnsetInterfacesFromBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{}, - swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + configured, err := bdHandler.UnsetInterfacesFromBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{}, + swIfIndexes) Expect(err).To(BeNil()) Expect(len(ctx.MockChannel.Msgs)).To(BeEquivalentTo(0)) @@ -212,7 +209,7 @@ func TestUnsetInterfacesFromBridgeDomainNoInterfaceToUnset(t *testing.T) { } func TestUnsetInterfacesFromBridgeDomainMissingInterface(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2BridgeReply{}) @@ -222,14 +219,14 @@ func TestUnsetInterfacesFromBridgeDomainMissingInterface(t *testing.T) { swIfIndexes.RegisterName("if1", 1, nil) // Metadata are not required for test purpose // Interface "if2" is not registered - configured, err := vppcalls.UnsetInterfacesFromBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ + configured, err := bdHandler.UnsetInterfacesFromBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ { Name: "if1", }, { Name: "if2", }, - }, swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, swIfIndexes) Expect(err).To(BeNil()) Expect(len(ctx.MockChannel.Msgs)).To(BeEquivalentTo(1)) @@ -237,7 +234,7 @@ func TestUnsetInterfacesFromBridgeDomainMissingInterface(t *testing.T) { } func TestUnsetInterfacesFromBridgeDomainError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2Bridge{}) @@ -245,18 +242,18 @@ func TestUnsetInterfacesFromBridgeDomainError(t *testing.T) { swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bd", nil)) swIfIndexes.RegisterName("if1", 1, nil) // Metadata are not required for test purpose - configured, err := vppcalls.UnsetInterfacesFromBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ + configured, err := bdHandler.UnsetInterfacesFromBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ { Name: "if1", }, - }, swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, swIfIndexes) Expect(err).ToNot(BeNil()) Expect(configured).To(BeNil()) } func TestUnsetInterfacesFromBridgeDomainRetval(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, bdHandler := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2BridgeReply{ @@ -266,11 +263,11 @@ func TestUnsetInterfacesFromBridgeDomainRetval(t *testing.T) { swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bd", nil)) swIfIndexes.RegisterName("if1", 1, nil) // Metadata are not required for test purpose - configured, err := vppcalls.UnsetInterfacesFromBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ + configured, err := bdHandler.UnsetInterfacesFromBridgeDomain("bd1", 1, []*l2.BridgeDomains_BridgeDomain_Interfaces{ { Name: "if1", }, - }, swIfIndexes, logrus.DefaultLogger(), ctx.MockChannel, nil) + }, swIfIndexes) Expect(err).ToNot(BeNil()) Expect(configured).To(BeNil()) diff --git a/plugins/vpp/l2plugin/vppcalls/l2fib_vppcalls.go b/plugins/vpp/l2plugin/vppcalls/l2fib_vppcalls.go index 42e82ffa65..782bf9b976 100644 --- a/plugins/vpp/l2plugin/vppcalls/l2fib_vppcalls.go +++ b/plugins/vpp/l2plugin/vppcalls/l2fib_vppcalls.go @@ -21,7 +21,6 @@ import ( govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/measure" l2ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" ) @@ -47,29 +46,10 @@ type FibLogicalReq struct { callback func(error) } -// L2FibVppCalls aggregates vpp calls related to l2 fib. -type L2FibVppCalls struct { - log logging.Logger - vppChan govppapi.Channel - stopwatch *measure.Stopwatch - requestChan chan *FibLogicalReq -} - -// NewL2FibVppCalls is a constructor. -func NewL2FibVppCalls(log logging.Logger, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) *L2FibVppCalls { - return &L2FibVppCalls{ - log: log, - vppChan: vppChan, - stopwatch: stopwatch, - requestChan: make(chan *FibLogicalReq), - } -} - -// Add creates L2 FIB table entry. -func (fib *L2FibVppCalls) Add(mac string, bdID uint32, ifIdx uint32, bvi bool, static bool, callback func(error)) error { - fib.log.Debug("Adding L2 FIB table entry, mac: ", mac) +func (handler *fibVppHandler) Add(mac string, bdID uint32, ifIdx uint32, bvi bool, static bool, callback func(error)) error { + handler.log.Debug("Adding L2 FIB table entry, mac: ", mac) - fib.requestChan <- &FibLogicalReq{ + handler.requestChan <- &FibLogicalReq{ IsAdd: true, MAC: mac, BDIdx: bdID, @@ -81,11 +61,10 @@ func (fib *L2FibVppCalls) Add(mac string, bdID uint32, ifIdx uint32, bvi bool, s return nil } -// Delete removes existing L2 FIB table entry. -func (fib *L2FibVppCalls) Delete(mac string, bdID uint32, ifIdx uint32, callback func(error)) error { - fib.log.Debug("Removing L2 fib table entry, mac: ", mac) +func (handler *fibVppHandler) Delete(mac string, bdID uint32, ifIdx uint32, callback func(error)) error { + handler.log.Debug("Removing L2 fib table entry, mac: ", mac) - fib.requestChan <- &FibLogicalReq{ + handler.requestChan <- &FibLogicalReq{ IsAdd: false, MAC: mac, BDIdx: bdID, @@ -95,18 +74,17 @@ func (fib *L2FibVppCalls) Delete(mac string, bdID uint32, ifIdx uint32, callback return nil } -// WatchFIBReplies handles L2 FIB add/del requests -func (fib *L2FibVppCalls) WatchFIBReplies() { +func (handler *fibVppHandler) WatchFIBReplies() { for { select { - case r := <-fib.requestChan: - fib.log.Debug("VPP L2FIB request: ", r) - err := l2fibAddDel(r.MAC, r.BDIdx, r.SwIfIdx, r.BVI, r.Static, r.IsAdd, fib.vppChan, fib.stopwatch) + case r := <-handler.requestChan: + handler.log.Debug("VPP L2FIB request: ", r) + err := handler.l2fibAddDel(r.MAC, r.BDIdx, r.SwIfIdx, r.BVI, r.Static, r.IsAdd) if err != nil { - fib.log.WithFields(logging.Fields{"mac": r.MAC, "bdIdx": r.BDIdx}). + handler.log.WithFields(logging.Fields{"mac": r.MAC, "bdIdx": r.BDIdx}). Error("Static fib entry add/delete failed:", err) } else { - fib.log.WithFields(logging.Fields{"mac": r.MAC, "bdIdx": r.BDIdx}). + handler.log.WithFields(logging.Fields{"mac": r.MAC, "bdIdx": r.BDIdx}). Debug("Static fib entry added/deleted.") } r.callback(err) @@ -114,9 +92,9 @@ func (fib *L2FibVppCalls) WatchFIBReplies() { } } -func l2fibAddDel(macstr string, bdIdx, swIfIdx uint32, bvi, static, isAdd bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (err error) { +func (handler *fibVppHandler) l2fibAddDel(macstr string, bdIdx, swIfIdx uint32, bvi, static, isAdd bool) (err error) { defer func(t time.Time) { - stopwatch.TimeLog(l2ba.L2fibAddDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(l2ba.L2fibAddDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) var mac []byte @@ -137,7 +115,7 @@ func l2fibAddDel(macstr string, bdIdx, swIfIdx uint32, bvi, static, isAdd bool, } reply := &l2ba.L2fibAddDelReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.asyncCallsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { diff --git a/plugins/vpp/l2plugin/vppcalls/l2fib_vppcalls_test.go b/plugins/vpp/l2plugin/vppcalls/l2fib_vppcalls_test.go index b42fde7530..3df508ce52 100644 --- a/plugins/vpp/l2plugin/vppcalls/l2fib_vppcalls_test.go +++ b/plugins/vpp/l2plugin/vppcalls/l2fib_vppcalls_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package vppcalls +package vppcalls_test import ( "log" @@ -23,6 +23,7 @@ import ( govppcore "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/logging/logrus" l2ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" + "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" logrus2 "github.com/sirupsen/logrus" @@ -53,11 +54,10 @@ var deleteTestDataOutFib = &l2ba.L2fibAddDel{ } func TestL2FibAdd(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, fibHandler := fibTestSetup(t) defer ctx.TeardownTestCtx() - l2FibVppCalls := NewL2FibVppCalls(logrus.DefaultLogger(), ctx.MockChannel, nil) - go l2FibVppCalls.WatchFIBReplies() + go fibHandler.WatchFIBReplies() errc := make(chan error, len(testDataInFib)) cb := func(err error) { @@ -65,7 +65,7 @@ func TestL2FibAdd(t *testing.T) { } for i := 0; i < len(testDataInFib); i++ { ctx.MockVpp.MockReply(&l2ba.L2fibAddDelReply{}) - l2FibVppCalls.Add(testDataInFib[i].mac, testDataInFib[i].bdID, testDataInFib[i].ifIdx, + fibHandler.Add(testDataInFib[i].mac, testDataInFib[i].bdID, testDataInFib[i].ifIdx, testDataInFib[i].bvi, testDataInFib[i].static, cb) err := <-errc Expect(err).ShouldNot(HaveOccurred()) @@ -74,38 +74,36 @@ func TestL2FibAdd(t *testing.T) { } func TestL2FibAddError(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, fibHandler := fibTestSetup(t) defer ctx.TeardownTestCtx() - l2FibVppCalls := NewL2FibVppCalls(logrus.DefaultLogger(), ctx.MockChannel, nil) - go l2FibVppCalls.WatchFIBReplies() + go fibHandler.WatchFIBReplies() errc := make(chan error, len(testDataInFib)) cb := func(err error) { errc <- err } - l2FibVppCalls.Add("not:mac:addr", 4, 10, false, false, cb) + fibHandler.Add("not:mac:addr", 4, 10, false, false, cb) err := <-errc Expect(err).Should(HaveOccurred()) ctx.MockVpp.MockReply(&l2ba.L2fibAddDelReply{Retval: 1}) - l2FibVppCalls.Add("FF:FF:FF:FF:FF:FF", 4, 10, false, false, cb) + fibHandler.Add("FF:FF:FF:FF:FF:FF", 4, 10, false, false, cb) err = <-errc Expect(err).Should(HaveOccurred()) ctx.MockVpp.MockReply(&l2ba.BridgeDomainAddDelReply{}) - l2FibVppCalls.Add("FF:FF:FF:FF:FF:FF", 4, 10, false, false, cb) + fibHandler.Add("FF:FF:FF:FF:FF:FF", 4, 10, false, false, cb) err = <-errc Expect(err).Should(HaveOccurred()) } func TestL2FibDelete(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, fibHandler := fibTestSetup(t) defer ctx.TeardownTestCtx() - l2FibVppCalls := NewL2FibVppCalls(logrus.DefaultLogger(), ctx.MockChannel, nil) - go l2FibVppCalls.WatchFIBReplies() + go fibHandler.WatchFIBReplies() errc := make(chan error, len(testDataInFib)) cb := func(err error) { @@ -113,7 +111,7 @@ func TestL2FibDelete(t *testing.T) { } for i := 0; i < len(testDataInFib); i++ { ctx.MockVpp.MockReply(&l2ba.L2fibAddDelReply{}) - l2FibVppCalls.Delete(testDataInFib[i].mac, testDataInFib[i].bdID, testDataInFib[i].ifIdx, cb) + fibHandler.Delete(testDataInFib[i].mac, testDataInFib[i].bdID, testDataInFib[i].ifIdx, cb) err := <-errc Expect(err).ShouldNot(HaveOccurred()) Expect(ctx.MockChannel.Msg).To(Equal(deleteTestDataOutFib)) @@ -121,11 +119,10 @@ func TestL2FibDelete(t *testing.T) { } func TestWatchFIBReplies(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, fibHandler := fibTestSetup(t) defer ctx.TeardownTestCtx() - l2FibVppCalls := NewL2FibVppCalls(logrus.DefaultLogger(), ctx.MockChannel, nil) - go l2FibVppCalls.WatchFIBReplies() + go fibHandler.WatchFIBReplies() ctx.MockVpp.MockReply(&l2ba.L2fibAddDelReply{}) @@ -134,7 +131,7 @@ func TestWatchFIBReplies(t *testing.T) { log.Println("dummyCallback:", err) errc <- err } - l2FibVppCalls.Add("FF:FF:FF:FF:FF:FF", 4, 45, false, false, cb) + fibHandler.Add("FF:FF:FF:FF:FF:FF", 4, 45, false, false, cb) select { case err := <-errc: @@ -145,7 +142,7 @@ func TestWatchFIBReplies(t *testing.T) { } func benchmarkWatchFIBReplies(reqN int, b *testing.B) { - ctx := vppcallmock.SetupTestCtx(nil) + ctx, fibHandler := fibTestSetup(nil) defer ctx.TeardownTestCtx() // debug logs slow down benchmarks @@ -154,8 +151,7 @@ func benchmarkWatchFIBReplies(reqN int, b *testing.B) { govpplogger.Level = logrus2.WarnLevel govppcore.SetLogger(govpplogger) - l2FibVppCalls := NewL2FibVppCalls(logrus.DefaultLogger(), ctx.MockChannel, nil) - go l2FibVppCalls.WatchFIBReplies() + go fibHandler.WatchFIBReplies() errc := make(chan error, reqN) cb := func(err error) { @@ -165,7 +161,7 @@ func benchmarkWatchFIBReplies(reqN int, b *testing.B) { for n := 0; n < b.N; n++ { for i := 0; i < reqN; i++ { ctx.MockVpp.MockReply(&l2ba.L2fibAddDelReply{}) - l2FibVppCalls.Add("FF:FF:FF:FF:FF:FF", 4, 45, false, false, cb) + fibHandler.Add("FF:FF:FF:FF:FF:FF", 4, 45, false, false, cb) } count := 0 @@ -190,3 +186,12 @@ func BenchmarkWatchFIBReplies1(b *testing.B) { benchmarkWatchFIBReplies(1, b) func BenchmarkWatchFIBReplies10(b *testing.B) { benchmarkWatchFIBReplies(10, b) } func BenchmarkWatchFIBReplies100(b *testing.B) { benchmarkWatchFIBReplies(100, b) } func BenchmarkWatchFIBReplies1000(b *testing.B) { benchmarkWatchFIBReplies(1000, b) } + +func fibTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.FibVppAPI) { + ctx := vppcallmock.SetupTestCtx(t) + logger := logrus.NewLogger("test-log") + requestChan := make(chan *vppcalls.FibLogicalReq) + fibHandler, err := vppcalls.NewFibVppHandler(ctx.MockChannel, ctx.MockChannel, requestChan, logger, nil) + Expect(err).To(BeNil()) + return ctx, fibHandler +} diff --git a/plugins/vpp/l2plugin/vppcalls/xconnect_vppcalls.go b/plugins/vpp/l2plugin/vppcalls/xconnect_vppcalls.go index b1066ff973..8f8c6f49fd 100644 --- a/plugins/vpp/l2plugin/vppcalls/xconnect_vppcalls.go +++ b/plugins/vpp/l2plugin/vppcalls/xconnect_vppcalls.go @@ -19,7 +19,6 @@ import ( "time" govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" l2ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" ) @@ -31,19 +30,17 @@ var XConnectMessages = []govppapi.Message{ &l2ba.SwInterfaceSetL2XconnectReply{}, } -// AddL2XConnect creates xConnect between two existing interfaces. -func AddL2XConnect(rxIfIdx uint32, txIfIdx uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return addDelXConnect(rxIfIdx, txIfIdx, true, vppChan, stopwatch) +func (handler *xConnectVppHandler) AddL2XConnect(rxIfIdx uint32, txIfIdx uint32) error { + return handler.addDelXConnect(rxIfIdx, txIfIdx, true) } -// DeleteL2XConnect removes xConnect between two interfaces. -func DeleteL2XConnect(rxIfIdx uint32, txIfIdx uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return addDelXConnect(rxIfIdx, txIfIdx, false, vppChan, stopwatch) +func (handler *xConnectVppHandler) DeleteL2XConnect(rxIfIdx uint32, txIfIdx uint32) error { + return handler.addDelXConnect(rxIfIdx, txIfIdx, false) } -func addDelXConnect(rxIfaceIdx uint32, txIfaceIdx uint32, enable bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *xConnectVppHandler) addDelXConnect(rxIfaceIdx uint32, txIfaceIdx uint32, enable bool) error { defer func(t time.Time) { - stopwatch.TimeLog(l2ba.SwInterfaceSetL2Xconnect{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(l2ba.SwInterfaceSetL2Xconnect{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &l2ba.SwInterfaceSetL2Xconnect{ @@ -53,7 +50,7 @@ func addDelXConnect(rxIfaceIdx uint32, txIfaceIdx uint32, enable bool, vppChan g } reply := &l2ba.SwInterfaceSetL2XconnectReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { diff --git a/plugins/vpp/l2plugin/vppcalls/xconnect_vppcalls_test.go b/plugins/vpp/l2plugin/vppcalls/xconnect_vppcalls_test.go index f5cd284fa5..691a11de6d 100644 --- a/plugins/vpp/l2plugin/vppcalls/xconnect_vppcalls_test.go +++ b/plugins/vpp/l2plugin/vppcalls/xconnect_vppcalls_test.go @@ -12,13 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -package vppcalls +package vppcalls_test import ( "testing" govppapi "git.fd.io/govpp.git/api" + "github.com/ligato/cn-infra/logging/logrus" l2ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" + "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) @@ -51,13 +53,13 @@ scenarios: */ // TestVppSetL2XConnect tests VppSetL2XConnect method func TestVppSetL2XConnect(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, xcHandler := xcTestSetup(t) defer ctx.TeardownTestCtx() for i := 0; i < len(setTestDataInXConnect); i++ { ctx.MockVpp.MockReply(setTestDataInXConnect[i].message) - err := AddL2XConnect(setTestDataInXConnect[i].receiveIfaceIndex, - setTestDataInXConnect[i].transmitIfaceIndex, ctx.MockChannel, nil) + err := xcHandler.AddL2XConnect(setTestDataInXConnect[i].receiveIfaceIndex, + setTestDataInXConnect[i].transmitIfaceIndex) if setTestDataOutXConnect[i].isResultOk { Expect(err).To(BeNil()) @@ -96,13 +98,13 @@ scenarios: */ // TestVppUnsetL2XConnect tests VppUnsetL2XConnect method func TestVppUnsetL2XConnect(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, xcHandler := xcTestSetup(t) defer ctx.TeardownTestCtx() for i := 0; i < len(unsetTestDataInXConnect); i++ { ctx.MockVpp.MockReply(unsetTestDataInXConnect[i].message) - err := DeleteL2XConnect(unsetTestDataInXConnect[i].receiveIfaceIndex, - unsetTestDataInXConnect[i].transmitIfaceIndex, ctx.MockChannel, nil) + err := xcHandler.DeleteL2XConnect(unsetTestDataInXConnect[i].receiveIfaceIndex, + unsetTestDataInXConnect[i].transmitIfaceIndex) if unsetTestDataOutXConnect[i].isResultOk { Expect(err).To(BeNil()) @@ -112,3 +114,11 @@ func TestVppUnsetL2XConnect(t *testing.T) { Expect(ctx.MockChannel.Msg).To(Equal(unsetTestDataOutXConnect[i].outData)) } } + +func xcTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.XConnectVppAPI) { + ctx := vppcallmock.SetupTestCtx(t) + log := logrus.NewLogger("test-log") + xcHandler, err := vppcalls.NewXConnectVppHandler(ctx.MockChannel, log, nil) + Expect(err).To(BeNil()) + return ctx, xcHandler +} diff --git a/plugins/vpp/l2plugin/vppdump/doc.go b/plugins/vpp/l2plugin/vppdump/doc.go deleted file mode 100644 index 93494dde3d..0000000000 --- a/plugins/vpp/l2plugin/vppdump/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package vppdump provides helpers for dumping all bridge-domains, L2 FIBs and -// XConnect pairs configured in VPP. -package vppdump diff --git a/plugins/vpp/l2plugin/xconnect_config.go b/plugins/vpp/l2plugin/xconnect_config.go index cef26597df..f46150bccf 100644 --- a/plugins/vpp/l2plugin/xconnect_config.go +++ b/plugins/vpp/l2plugin/xconnect_config.go @@ -41,6 +41,7 @@ type XConnectConfigurator struct { xcIndexSeq uint32 vppChan govppapi.Channel + xcHandler vppcalls.XConnectVppAPI stopwatch *measure.Stopwatch // Timer used to measure and store time } @@ -51,6 +52,11 @@ func (plugin *XConnectConfigurator) Init(logger logging.PluginLogger, goVppMux g plugin.log = logger.NewLogger("-xc-conf") plugin.log.Info("Initializing L2 xConnect configurator") + // Stopwatch + if enableStopwatch { + plugin.stopwatch = measure.NewStopwatch("XCConfigurator", plugin.log) + } + // Mappings plugin.ifIndexes = swIfIndexes plugin.xcIndexes = l2idx.NewXcIndex(nametoidx.NewNameToIdx(plugin.log, "xc-indexes", nil)) @@ -64,9 +70,9 @@ func (plugin *XConnectConfigurator) Init(logger logging.PluginLogger, goVppMux g return err } - // Stopwatch - if enableStopwatch { - plugin.stopwatch = measure.NewStopwatch("BFDConfigurator", plugin.log) + // Cross-connect VPP API handler + if plugin.xcHandler, err = vppcalls.NewXConnectVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch); err != nil { + return err } // Message compatibility @@ -125,7 +131,7 @@ func (plugin *XConnectConfigurator) ConfigureXConnectPair(xc *l2.XConnectPairs_X return nil } // XConnect can be configured now - if err := vppcalls.AddL2XConnect(rxIfIdx, txIfIdx, plugin.vppChan, plugin.stopwatch); err != nil { + if err := plugin.xcHandler.AddL2XConnect(rxIfIdx, txIfIdx); err != nil { plugin.log.Errorf("Adding l2xConnect failed: %v", err) return err } @@ -166,7 +172,7 @@ func (plugin *XConnectConfigurator) ModifyXConnectPair(newXc, oldXc *l2.XConnect return nil // Nothing more can be done } plugin.log.Debugf("Removing obsolete l2xConnect %s-%s", oldXc.ReceiveInterface, oldXc.TransmitInterface) - if err := vppcalls.DeleteL2XConnect(rxIfIdx, oldTxIfIdx, plugin.vppChan, plugin.stopwatch); err != nil { + if err := plugin.xcHandler.DeleteL2XConnect(rxIfIdx, oldTxIfIdx); err != nil { plugin.log.Errorf("Deleted obsolete l2xConnect failed: %v", err) return err } @@ -174,7 +180,7 @@ func (plugin *XConnectConfigurator) ModifyXConnectPair(newXc, oldXc *l2.XConnect return nil } // Replace existing entry - if err := vppcalls.AddL2XConnect(rxIfIdx, txIfIdx, plugin.vppChan, plugin.stopwatch); err != nil { + if err := plugin.xcHandler.AddL2XConnect(rxIfIdx, txIfIdx); err != nil { plugin.log.Errorf("Replacing l2xConnect failed: %v", err) return err } @@ -214,7 +220,7 @@ func (plugin *XConnectConfigurator) DeleteXConnectPair(xc *l2.XConnectPairs_XCon return nil } // XConnect can be removed now - if err := vppcalls.DeleteL2XConnect(rxIfIdx, txIfIdx, plugin.vppChan, plugin.stopwatch); err != nil { + if err := plugin.xcHandler.DeleteL2XConnect(rxIfIdx, txIfIdx); err != nil { plugin.log.Errorf("Removing l2xConnect failed: %v", err) return err } diff --git a/plugins/vpp/plugin_impl_vpp.go b/plugins/vpp/plugin_impl_vpp.go index 22cfdd1057..3ffbe86937 100644 --- a/plugins/vpp/plugin_impl_vpp.go +++ b/plugins/vpp/plugin_impl_vpp.go @@ -527,8 +527,7 @@ func (plugin *Plugin) initL2(ctx context.Context) error { // L2 FIB configurator plugin.fibConfigurator = &l2plugin.FIBConfigurator{} - err = plugin.fibConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.bdIndexes, plugin.enableStopwatch) - if err != nil { + if err := plugin.fibConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.bdIndexes, plugin.enableStopwatch); err != nil { return err } plugin.Log.Debug("fibConfigurator Initialized") From d8c21a967ba7b7404c1734979aec808094e51e8a Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Tue, 10 Jul 2018 08:38:05 +0200 Subject: [PATCH 036/174] l3plugin vppcalls api Signed-off-by: Vladimir Lavor --- plugins/rest/rest_handlers.go | 18 ++- plugins/vpp/l3plugin/arp_config.go | 17 +- plugins/vpp/l3plugin/arp_proxy_config.go | 31 ++-- plugins/vpp/l3plugin/data_resync.go | 6 +- plugins/vpp/l3plugin/route_config.go | 26 ++- plugins/vpp/l3plugin/vppcalls/api_vppcalls.go | 148 ++++++++++++++++++ plugins/vpp/l3plugin/vppcalls/arp_vppcalls.go | 17 +- .../l3plugin/vppcalls/arp_vppcalls_test.go | 26 ++- plugins/vpp/l3plugin/vppcalls/doc.go | 2 +- .../{vppdump => vppcalls}/dump_vppcalls.go | 71 ++++----- .../dump_vppcalls_test.go | 10 +- .../l3plugin/vppcalls/proxyarp_vppcalls.go | 38 ++--- .../vppcalls/proxyarp_vppcalls_test.go | 29 ++-- .../vpp/l3plugin/vppcalls/route_vppcalls.go | 22 +-- .../l3plugin/vppcalls/route_vppcalls_test.go | 31 ++-- plugins/vpp/l3plugin/vppdump/doc.go | 2 - 16 files changed, 341 insertions(+), 153 deletions(-) create mode 100644 plugins/vpp/l3plugin/vppcalls/api_vppcalls.go rename plugins/vpp/l3plugin/{vppdump => vppcalls}/dump_vppcalls.go (62%) rename plugins/vpp/l3plugin/{vppdump => vppcalls}/dump_vppcalls_test.go (89%) delete mode 100644 plugins/vpp/l3plugin/vppdump/doc.go diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index 8fc520aaa6..eb75cf5168 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -32,7 +32,7 @@ import ( aclcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" ifplugin "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" l2plugin "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppdump" - l3plugin "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppdump" + l3plugin "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" ) @@ -191,7 +191,13 @@ func (plugin *Plugin) arpGetHandler(formatter *render.Render) http.HandlerFunc { } defer ch.Close() - res, err := l3plugin.DumpArps(plugin.Log, ch, nil) + l3Handler, err := l3plugin.NewArpVppHandler(ch, plugin.Log, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + res, err := l3Handler.DumpArpEntries() if err != nil { plugin.Log.Errorf("Error: %v", err) formatter.JSON(w, http.StatusInternalServerError, nil) @@ -218,7 +224,13 @@ func (plugin *Plugin) staticRoutesGetHandler(formatter *render.Render) http.Hand } defer ch.Close() - res, err := l3plugin.DumpStaticRoutes(plugin.Log, ch, nil) + l3Handler, err := l3plugin.NewRouteVppHandler(ch, plugin.Log, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + res, err := l3Handler.DumpStaticRoutes() if err != nil { plugin.Log.Errorf("Error: %v", err) formatter.JSON(w, http.StatusInternalServerError, nil) diff --git a/plugins/vpp/l3plugin/arp_config.go b/plugins/vpp/l3plugin/arp_config.go index 4b5b5346fe..86a6bcd679 100644 --- a/plugins/vpp/l3plugin/arp_config.go +++ b/plugins/vpp/l3plugin/arp_config.go @@ -52,6 +52,8 @@ type ArpConfigurator struct { // VPP channel vppChan govppapi.Channel + // VPP API handler + arpHandler vppcalls.ArpVppAPI // Timer used to measure and store time stopwatch *measure.Stopwatch @@ -64,6 +66,11 @@ func (plugin *ArpConfigurator) Init(logger logging.PluginLogger, goVppMux govppm plugin.log = logger.NewLogger("-l3-arp-conf") plugin.log.Debug("Initializing ARP configurator") + // Configurator-wide stopwatch instance + if enableStopwatch { + plugin.stopwatch = measure.NewStopwatch("ARP-configurator", plugin.log) + } + // Mappings plugin.ifIndexes = swIfIndexes plugin.arpIndexes = l3idx.NewARPIndex(nametoidx.NewNameToIdx(plugin.log, "arp_indexes", nil)) @@ -77,9 +84,9 @@ func (plugin *ArpConfigurator) Init(logger logging.PluginLogger, goVppMux govppm return err } - // Stopwatch - if enableStopwatch { - plugin.stopwatch = measure.NewStopwatch("ARPConfigurator", plugin.log) + // VPP API handler + if plugin.arpHandler, err = vppcalls.NewArpVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch); err != nil { + return err } // Message compatibility @@ -160,7 +167,7 @@ func (plugin *ArpConfigurator) AddArp(entry *l3.ArpTable_ArpEntry) error { plugin.log.Debugf("adding ARP: %+v", *arp) // Create and register new arp entry - if err = vppcalls.VppAddArp(arp, plugin.vppChan, plugin.stopwatch); err != nil { + if err = plugin.arpHandler.VppAddArp(arp); err != nil { return err } @@ -235,7 +242,7 @@ func (plugin *ArpConfigurator) DeleteArp(entry *l3.ArpTable_ArpEntry) error { plugin.log.Debugf("deleting ARP: %+v", arp) // Delete and un-register new arp - if err = vppcalls.VppDelArp(arp, plugin.vppChan, plugin.stopwatch); err != nil { + if err = plugin.arpHandler.VppDelArp(arp); err != nil { return err } _, _, found = plugin.arpIndexes.UnregisterName(arpID) diff --git a/plugins/vpp/l3plugin/arp_proxy_config.go b/plugins/vpp/l3plugin/arp_proxy_config.go index 19261182cb..071af875d1 100644 --- a/plugins/vpp/l3plugin/arp_proxy_config.go +++ b/plugins/vpp/l3plugin/arp_proxy_config.go @@ -51,6 +51,8 @@ type ProxyArpConfigurator struct { // VPP channel vppChan govppapi.Channel + // VPP API channel + pArpHandler vppcalls.ProxyArpVppAPI // Timer used to measure and store time stopwatch *measure.Stopwatch @@ -63,6 +65,11 @@ func (plugin *ProxyArpConfigurator) Init(logger logging.PluginLogger, goVppMux g plugin.log = logger.NewLogger("-l3-proxy-arp-conf") plugin.log.Debugf("Initializing proxy ARP configurator") + // Configurator-wide stopwatch instance + if enableStopwatch { + plugin.stopwatch = measure.NewStopwatch("ARP-proxy-configurator", plugin.log) + } + // Mappings plugin.ifIndexes = swIfIndexes plugin.pArpIfIndexes = nametoidx.NewNameToIdx(plugin.log, "proxyarp_if_indices", nil) @@ -75,9 +82,9 @@ func (plugin *ProxyArpConfigurator) Init(logger logging.PluginLogger, goVppMux g return err } - // Stopwatch - if enableStopwatch { - plugin.stopwatch = measure.NewStopwatch("ProxyARPConfigurator", plugin.log) + // VPP API handler + if plugin.pArpHandler, err = vppcalls.NewProxyArpVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch); err != nil { + return err } // Message compatibility @@ -134,7 +141,7 @@ func (plugin *ProxyArpConfigurator) AddInterface(pArpIf *l3.ProxyArpInterfaces_I } // Call VPP API to enable interface for proxy ARP - if err := vppcalls.EnableProxyArpInterface(ifIdx, plugin.vppChan, plugin.log, plugin.stopwatch); err == nil { + if err := plugin.pArpHandler.EnableProxyArpInterface(ifIdx); err == nil { plugin.log.Debugf("Interface %s enabled for proxy ARP", ifName) } else { err := fmt.Errorf("enabling interface %s for proxy ARP failed: %v", ifName, err) @@ -168,7 +175,7 @@ func (plugin *ProxyArpConfigurator) ModifyInterface(newPArpIf, oldPArpIf *l3.Pro ifIdx, _, found := plugin.ifIndexes.LookupIdx(ifName) // If interface is not found, there is nothing to do if found { - if err := vppcalls.DisableProxyArpInterface(ifIdx, plugin.vppChan, plugin.log, plugin.stopwatch); err == nil { + if err := plugin.pArpHandler.DisableProxyArpInterface(ifIdx); err == nil { plugin.log.Debugf("Interface %s disabled for proxy ARP", ifName) } else { err = fmt.Errorf("disabling interface %s for proxy ARP failed: %v", ifName, err) @@ -187,7 +194,7 @@ func (plugin *ProxyArpConfigurator) ModifyInterface(newPArpIf, oldPArpIf *l3.Pro continue } // Configure - if err := vppcalls.EnableProxyArpInterface(ifIdx, plugin.vppChan, plugin.log, plugin.stopwatch); err == nil { + if err := plugin.pArpHandler.EnableProxyArpInterface(ifIdx); err == nil { plugin.log.Debugf("Interface %s enabled for proxy ARP", ifName) } else { err := fmt.Errorf("enabling interface %s for proxy ARP failed: %v", ifName, err) @@ -222,7 +229,7 @@ ProxyArpIfLoop: continue } // Call VPP API to disable interface for proxy ARP - if err := vppcalls.DisableProxyArpInterface(ifIdx, plugin.vppChan, plugin.log, plugin.stopwatch); err == nil { + if err := plugin.pArpHandler.DisableProxyArpInterface(ifIdx); err == nil { plugin.log.Debugf("Interface %s disabled for proxy ARP", ifName) } else { err = fmt.Errorf("disabling interface %s for proxy ARP failed: %v", ifName, err) @@ -257,7 +264,7 @@ func (plugin *ProxyArpConfigurator) AddRange(pArpRng *l3.ProxyArpRanges_RangeLis bFirstIP := net.ParseIP(firstIP).To4() bLastIP := net.ParseIP(lastIP).To4() // Call VPP API to configure IP range for proxy ARP - if err := vppcalls.AddProxyArpRange(bFirstIP, bLastIP, plugin.vppChan, plugin.log, plugin.stopwatch); err == nil { + if err := plugin.pArpHandler.AddProxyArpRange(bFirstIP, bLastIP); err == nil { plugin.log.Debugf("Address range %s - %s configured for proxy ARP", firstIP, lastIP) } else { err := fmt.Errorf("failed to configure proxy ARP address range %s - %s: %v", firstIP, lastIP, err) @@ -295,7 +302,7 @@ func (plugin *ProxyArpConfigurator) ModifyRange(newPArpRng, oldPArpRng *l3.Proxy bFirstIP := net.ParseIP(firstIP).To4() bLastIP := net.ParseIP(lastIP).To4() // Call VPP API to configure IP range for proxy ARP - if err := vppcalls.DeleteProxyArpRange(bFirstIP, bLastIP, plugin.vppChan, plugin.log, plugin.stopwatch); err == nil { + if err := plugin.pArpHandler.DeleteProxyArpRange(bFirstIP, bLastIP); err == nil { plugin.log.Debugf("Address range %s - %s removed from proxy ARP setup", firstIP, lastIP) } else { err = fmt.Errorf("failed to remove proxy ARP address range %s - %s: %v", firstIP, lastIP, err) @@ -320,7 +327,7 @@ func (plugin *ProxyArpConfigurator) ModifyRange(newPArpRng, oldPArpRng *l3.Proxy bFirstIP := net.ParseIP(firstIP).To4() bLastIP := net.ParseIP(lastIP).To4() // Call VPP API to configure IP range for proxy ARP - if err := vppcalls.AddProxyArpRange(bFirstIP, bLastIP, plugin.vppChan, plugin.log, plugin.stopwatch); err == nil { + if err := plugin.pArpHandler.AddProxyArpRange(bFirstIP, bLastIP); err == nil { plugin.log.Debugf("Address range %s - %s configured for proxy ARP", firstIP, lastIP) } else { err := fmt.Errorf("failed to configure proxy ARP address range %s - %s: %v", firstIP, lastIP, err) @@ -352,7 +359,7 @@ func (plugin *ProxyArpConfigurator) DeleteRange(pArpRng *l3.ProxyArpRanges_Range bFirstIP := net.ParseIP(firstIP).To4() bLastIP := net.ParseIP(lastIP).To4() // Call VPP API to configure IP range for proxy ARP - if err := vppcalls.DeleteProxyArpRange(bFirstIP, bLastIP, plugin.vppChan, plugin.log, plugin.stopwatch); err == nil { + if err := plugin.pArpHandler.DeleteProxyArpRange(bFirstIP, bLastIP); err == nil { plugin.log.Debugf("Address range %s - %s removed from proxy ARP setup", firstIP, lastIP) } else { err = fmt.Errorf("failed to remove proxy ARP address range %s - %s: %v", firstIP, lastIP, err) @@ -375,7 +382,7 @@ func (plugin *ProxyArpConfigurator) ResolveCreatedInterface(ifName string, ifIdx for idx, cachedIf := range plugin.pArpIfCache { if cachedIf == ifName { // Configure cached interface - if err := vppcalls.EnableProxyArpInterface(ifIdx, plugin.vppChan, plugin.log, plugin.stopwatch); err != nil { + if err := plugin.pArpHandler.EnableProxyArpInterface(ifIdx); err != nil { plugin.log.Error(err) return err } diff --git a/plugins/vpp/l3plugin/data_resync.go b/plugins/vpp/l3plugin/data_resync.go index 60393e2a86..c01426aa9a 100644 --- a/plugins/vpp/l3plugin/data_resync.go +++ b/plugins/vpp/l3plugin/data_resync.go @@ -16,10 +16,8 @@ package l3plugin import ( "fmt" - "github.com/ligato/cn-infra/logging/measure" - l3ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" + "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" - "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppdump" "github.com/ligato/vpp-agent/plugins/vpp/model/l3" ) @@ -37,7 +35,7 @@ func (plugin *RouteConfigurator) Resync(nbRoutes []*l3.StaticRoutes_Route) error plugin.clearMapping() // Retrieve VPP route configuration - vppRoutes, err := vppdump.DumpStaticRoutes(plugin.log, plugin.vppChan, measure.GetTimeLog(l3ba.IPFibDump{}, plugin.stopwatch)) + vppRoutes, err := plugin.rtHandler.DumpStaticRoutes() if err != nil { return err } diff --git a/plugins/vpp/l3plugin/route_config.go b/plugins/vpp/l3plugin/route_config.go index cdc42821d1..a4bb3c7ef3 100644 --- a/plugins/vpp/l3plugin/route_config.go +++ b/plugins/vpp/l3plugin/route_config.go @@ -31,6 +31,7 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/l3idx" "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/l3" + ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" ) // RouteConfigurator runs in the background in its own goroutine where it watches for any changes @@ -48,6 +49,9 @@ type RouteConfigurator struct { // VPP channels vppChan govppapi.Channel + // VPP API handlers + ifHandler ifvppcalls.IfVppWrite + rtHandler vppcalls.RouteVppAPI // Timer used to measure and store time stopwatch *measure.Stopwatch @@ -60,6 +64,11 @@ func (plugin *RouteConfigurator) Init(logger logging.PluginLogger, goVppMux govp plugin.log = logger.NewLogger("-l3-route-conf") plugin.log.Debug("Initializing L3 Route configurator") + // Configurator-wide stopwatch instance + if enableStopwatch { + plugin.stopwatch = measure.NewStopwatch("Route-configurator", plugin.log) + } + // Mappings plugin.ifIndexes = swIfIndexes plugin.rtIndexes = l3idx.NewRouteIndex(nametoidx.NewNameToIdx(plugin.log, "route_indexes", nil)) @@ -72,9 +81,12 @@ func (plugin *RouteConfigurator) Init(logger logging.PluginLogger, goVppMux govp return err } - // Stopwatch - if enableStopwatch { - plugin.stopwatch = measure.NewStopwatch("RouteConfigurator", plugin.log) + // VPP API handlers + if plugin.ifHandler, err = ifvppcalls.NewIfVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch); err != nil { + return err + } + if plugin.rtHandler, err = vppcalls.NewRouteVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch); err != nil { + return err } // Message compatibility @@ -141,7 +153,7 @@ func (plugin *RouteConfigurator) ConfigureRoute(config *l3.StaticRoutes_Route, v // Create and register new route. if route != nil { - err := vppcalls.VppAddRoute(route, plugin.vppChan, plugin.stopwatch) + err := plugin.rtHandler.VppAddRoute(plugin.ifHandler, route) if err != nil { return err } @@ -219,7 +231,7 @@ func (plugin *RouteConfigurator) deleteOldRoute(oldConfig *l3.StaticRoutes_Route return err } // Remove and unregister old route. - if err := vppcalls.VppDelRoute(oldRoute, plugin.vppChan, plugin.stopwatch); err != nil { + if err := plugin.rtHandler.VppDelRoute(oldRoute); err != nil { return err } @@ -252,7 +264,7 @@ func (plugin *RouteConfigurator) addNewRoute(newConfig *l3.StaticRoutes_Route, v return err } // Create and register new route. - if err = vppcalls.VppAddRoute(newRoute, plugin.vppChan, plugin.stopwatch); err != nil { + if err = plugin.rtHandler.VppAddRoute(plugin.ifHandler, newRoute); err != nil { return err } @@ -299,7 +311,7 @@ func (plugin *RouteConfigurator) DeleteRoute(config *l3.StaticRoutes_Route, vrfF plugin.log.Debugf("deleting route: %+v", route) // Remove and unregister route. - if err = vppcalls.VppDelRoute(route, plugin.vppChan, plugin.stopwatch); err != nil { + if err = plugin.rtHandler.VppDelRoute(route); err != nil { return err } diff --git a/plugins/vpp/l3plugin/vppcalls/api_vppcalls.go b/plugins/vpp/l3plugin/vppcalls/api_vppcalls.go new file mode 100644 index 0000000000..da20975814 --- /dev/null +++ b/plugins/vpp/l3plugin/vppcalls/api_vppcalls.go @@ -0,0 +1,148 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vppcalls + +import ( + govppapi "git.fd.io/govpp.git/api" + "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/logging/measure" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" +) + +// ArpVppAPI provides methods for managing ARP entries +type ArpVppAPI interface { + ArpVppWrite + ArpVppRead +} + +// ArpVppWrite provides write methods for ARPs +type ArpVppWrite interface { + // VppAddArp adds ARP entry according to provided input + VppAddArp(entry *ArpEntry) error + // VppDelArp removes old ARP entry according to provided input + VppDelArp(entry *ArpEntry) error +} + +// ArpVppRead provides read methods for ARPs +type ArpVppRead interface { + // DumpArpEntries dumps ARPs from VPP and fills them into the provided static route map. + DumpArpEntries() ([]*ArpEntry, error) +} + +// ProxyArpVppAPI provides methods for managing proxy ARP entries +type ProxyArpVppAPI interface { + ProxyArpVppWrite + ProxyArpVppRead +} + +// ProxyArpVppWrite provides write methods for proxy ARPs +type ProxyArpVppWrite interface { + // EnableProxyArpInterface enables interface for proxy ARP + EnableProxyArpInterface(swIfIdx uint32) error + // DisableProxyArpInterface disables interface for proxy ARP + DisableProxyArpInterface(swIfIdx uint32) error + // AddProxyArpRange adds new IP range for proxy ARP + AddProxyArpRange(firstIP, lastIP []byte) error + // DeleteProxyArpRange removes proxy ARP IP range + DeleteProxyArpRange(firstIP, lastIP []byte) error +} + +// ProxyArpVppRead provides read methods for proxy ARPs +type ProxyArpVppRead interface { + // TODO define +} + +// RouteVppAPI provides methods for managing routes +type RouteVppAPI interface { + RouteVppWrite + RouteVppRead +} + +// RouteVppWrite provides write methods for routes +type RouteVppWrite interface { + // VppAddRoute adds new route, according to provided input. Every route has to contain VRF ID (default is 0). + VppAddRoute(ifHandler vppcalls.IfVppWrite, route *Route) error + // VppDelRoute removes old route, according to provided input. Every route has to contain VRF ID (default is 0). + VppDelRoute(route *Route) error +} + +// RouteVppRead provides read methods for routes +type RouteVppRead interface { + // DumpStaticRoutes dumps l3 routes from VPP and fills them into the provided static route map. + DumpStaticRoutes() ([]*Route, error) +} + +// arpVppHandler is accessor for ARP-related vppcalls methods +type arpVppHandler struct { + stopwatch *measure.Stopwatch + callsChannel govppapi.Channel + log logging.Logger +} + +// proxyArpVppHandler is accessor for proxy ARP-related vppcalls methods +type proxyArpVppHandler struct { + stopwatch *measure.Stopwatch + callsChannel govppapi.Channel + log logging.Logger +} + +// routeHandler is accessor for route-related vppcalls methods +type routeHandler struct { + stopwatch *measure.Stopwatch + callsChannel govppapi.Channel + log logging.Logger +} + +// NewArpVppHandler creates new instance of IPsec vppcalls handler +func NewArpVppHandler(callsChan govppapi.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*arpVppHandler, error) { + handler := &arpVppHandler{ + callsChannel: callsChan, + stopwatch: stopwatch, + log: log, + } + if err := handler.callsChannel.CheckMessageCompatibility(ArpMessages...); err != nil { + return nil, err + } + + return handler, nil +} + +// NewProxyArpVppHandler creates new instance of proxy ARP vppcalls handler +func NewProxyArpVppHandler(callsChan govppapi.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*proxyArpVppHandler, error) { + handler := &proxyArpVppHandler{ + callsChannel: callsChan, + stopwatch: stopwatch, + log: log, + } + if err := handler.callsChannel.CheckMessageCompatibility(ProxyArpMessages...); err != nil { + return nil, err + } + + return handler, nil +} + +// NewRouteVppHandler creates new instance of route vppcalls handler +func NewRouteVppHandler(callsChan govppapi.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*routeHandler, error) { + handler := &routeHandler{ + callsChannel: callsChan, + stopwatch: stopwatch, + log: log, + } + if err := handler.callsChannel.CheckMessageCompatibility(RouteMessages...); err != nil { + return nil, err + } + + return handler, nil +} diff --git a/plugins/vpp/l3plugin/vppcalls/arp_vppcalls.go b/plugins/vpp/l3plugin/vppcalls/arp_vppcalls.go index e8c3c5abd2..8d891d248f 100644 --- a/plugins/vpp/l3plugin/vppcalls/arp_vppcalls.go +++ b/plugins/vpp/l3plugin/vppcalls/arp_vppcalls.go @@ -20,7 +20,6 @@ import ( "time" govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/cn-infra/utils/addrs" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" ) @@ -40,9 +39,9 @@ type ArpEntry struct { } // vppAddDelArp adds or removes ARP entry according to provided input -func vppAddDelArp(entry *ArpEntry, vppChan govppapi.Channel, delete bool, stopwatch *measure.Stopwatch) error { +func (handler *arpVppHandler) vppAddDelArp(entry *ArpEntry, delete bool) error { defer func(t time.Time) { - stopwatch.TimeLog(ip.IPNeighborAddDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(ip.IPNeighborAddDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &ip.IPNeighborAddDel{} @@ -78,7 +77,7 @@ func vppAddDelArp(entry *ArpEntry, vppChan govppapi.Channel, delete bool, stopwa // Send message reply := &ip.IPNeighborAddDelReply{} - if err = vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err = handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -88,12 +87,10 @@ func vppAddDelArp(entry *ArpEntry, vppChan govppapi.Channel, delete bool, stopwa return nil } -// VppAddArp adds ARP entry according to provided input -func VppAddArp(entry *ArpEntry, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return vppAddDelArp(entry, vppChan, false, stopwatch) +func (handler *arpVppHandler) VppAddArp(entry *ArpEntry) error { + return handler.vppAddDelArp(entry, false) } -// VppDelArp removes old ARP entry according to provided input -func VppDelArp(entry *ArpEntry, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return vppAddDelArp(entry, vppChan, true, stopwatch) +func (handler *arpVppHandler) VppDelArp(entry *ArpEntry) error { + return handler.vppAddDelArp(entry, true) } diff --git a/plugins/vpp/l3plugin/vppcalls/arp_vppcalls_test.go b/plugins/vpp/l3plugin/vppcalls/arp_vppcalls_test.go index b12b63a8cd..5f7e635443 100644 --- a/plugins/vpp/l3plugin/vppcalls/arp_vppcalls_test.go +++ b/plugins/vpp/l3plugin/vppcalls/arp_vppcalls_test.go @@ -15,11 +15,13 @@ package vppcalls_test import ( + "testing" + + "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" - "testing" ) var arpEntries = []vppcalls.ArpEntry{ @@ -45,30 +47,38 @@ var arpEntries = []vppcalls.ArpEntry{ // Test adding of ARP func TestAddArp(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, arpHandler := arpTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPNeighborAddDelReply{}) - err := vppcalls.VppAddArp(&arpEntries[0], ctx.MockChannel, nil) + err := arpHandler.VppAddArp(&arpEntries[0]) Expect(err).To(Succeed()) ctx.MockVpp.MockReply(&ip.IPNeighborAddDelReply{}) - err = vppcalls.VppAddArp(&arpEntries[1], ctx.MockChannel, nil) + err = arpHandler.VppAddArp(&arpEntries[1]) Expect(err).To(Succeed()) ctx.MockVpp.MockReply(&ip.IPNeighborAddDelReply{}) - err = vppcalls.VppAddArp(&arpEntries[2], ctx.MockChannel, nil) + err = arpHandler.VppAddArp(&arpEntries[2]) Expect(err).To(Succeed()) ctx.MockVpp.MockReply(&ip.IPNeighborAddDelReply{Retval: 1}) - err = vppcalls.VppAddArp(&arpEntries[0], ctx.MockChannel, nil) + err = arpHandler.VppAddArp(&arpEntries[0]) Expect(err).To(Not(BeNil())) } // Test deleting of ARP func TestDelArp(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, arpHandler := arpTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPNeighborAddDelReply{}) - err := vppcalls.VppDelArp(&arpEntries[0], ctx.MockChannel, nil) + err := arpHandler.VppDelArp(&arpEntries[0]) Expect(err).To(Succeed()) } + +func arpTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.ArpVppAPI) { + ctx := vppcallmock.SetupTestCtx(t) + log := logrus.NewLogger("test-log") + arpHandler, err := vppcalls.NewArpVppHandler(ctx.MockChannel, log, nil) + Expect(err).To(BeNil()) + return ctx, arpHandler +} diff --git a/plugins/vpp/l3plugin/vppcalls/doc.go b/plugins/vpp/l3plugin/vppcalls/doc.go index 5d38aca191..8ff27a465a 100644 --- a/plugins/vpp/l3plugin/vppcalls/doc.go +++ b/plugins/vpp/l3plugin/vppcalls/doc.go @@ -1,2 +1,2 @@ -// Package vppcalls contains wrappers over VPP binary APIs for L3 FIBs. +// Package vppcalls contains wrappers over VPP binary APIs for ARPs, proxy ARPs, L3 FIBs and helpers for dumping them. package vppcalls diff --git a/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go b/plugins/vpp/l3plugin/vppcalls/dump_vppcalls.go similarity index 62% rename from plugins/vpp/l3plugin/vppdump/dump_vppcalls.go rename to plugins/vpp/l3plugin/vppcalls/dump_vppcalls.go index ecd7f94df6..b52f787d69 100644 --- a/plugins/vpp/l3plugin/vppdump/dump_vppcalls.go +++ b/plugins/vpp/l3plugin/vppcalls/dump_vppcalls.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package vppdump +package vppcalls import ( "bytes" @@ -21,28 +21,21 @@ import ( "time" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/measure" + "git.fd.io/govpp.git/examples/bin_api/ip" "github.com/ligato/cn-infra/utils/addrs" l3ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" - "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" ) -// DumpStaticRoutes dumps l3 routes from VPP and fills them into the provided static route map. -func DumpStaticRoutes(log logging.Logger, vppChan govppapi.Channel, timeLog measure.StopWatchEntry) ([]*vppcalls.Route, error) { +func (handler *routeHandler) DumpStaticRoutes() ([]*Route, error) { // IPFibDump time measurement - start := time.Now() - defer func() { - if timeLog != nil { - timeLog.LogTimeEntry(time.Since(start)) - } - }() + defer func(t time.Time) { + handler.stopwatch.TimeLog(ip.IPFibDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) - var routes []*vppcalls.Route + var routes []*Route // Dump IPv4 l3 FIB. - reqCtx := vppChan.SendMultiRequest(&l3ba.IPFibDump{}) + reqCtx := handler.callsChannel.SendMultiRequest(&l3ba.IPFibDump{}) for { fibDetails := &l3ba.IPFibDetails{} stop, err := reqCtx.ReceiveReply(fibDetails) @@ -50,14 +43,13 @@ func DumpStaticRoutes(log logging.Logger, vppChan govppapi.Channel, timeLog meas break // Break from the loop. } if err != nil { - log.Error(err) return nil, err } if len(fibDetails.Path) > 0 && fibDetails.Path[0].IsDrop == 1 { // skip drop routes, not supported by vpp-agent continue } - ipv4Route, err := dumpStaticRouteIPv4Details(fibDetails) + ipv4Route, err := handler.dumpStaticRouteIPv4Details(fibDetails) if err != nil { return nil, err } @@ -65,7 +57,7 @@ func DumpStaticRoutes(log logging.Logger, vppChan govppapi.Channel, timeLog meas } // Dump IPv6 l3 FIB. - reqCtx = vppChan.SendMultiRequest(&l3ba.IP6FibDump{}) + reqCtx = handler.callsChannel.SendMultiRequest(&l3ba.IP6FibDump{}) for { fibDetails := &l3ba.IP6FibDetails{} stop, err := reqCtx.ReceiveReply(fibDetails) @@ -73,14 +65,13 @@ func DumpStaticRoutes(log logging.Logger, vppChan govppapi.Channel, timeLog meas break // break out of the loop } if err != nil { - log.Error(err) return nil, err } if len(fibDetails.Path) > 0 && fibDetails.Path[0].IsDrop == 1 { // skip drop routes, not supported by vpp-agent continue } - ipv6Route, err := dumpStaticRouteIPv6Details(fibDetails) + ipv6Route, err := handler.dumpStaticRouteIPv6Details(fibDetails) if err != nil { return nil, err } @@ -90,16 +81,16 @@ func DumpStaticRoutes(log logging.Logger, vppChan govppapi.Channel, timeLog meas return routes, nil } -func dumpStaticRouteIPv4Details(fibDetails *l3ba.IPFibDetails) (*vppcalls.Route, error) { - return dumpStaticRouteIPDetails(fibDetails.TableID, fibDetails.TableName, fibDetails.Address, fibDetails.AddressLength, fibDetails.Path, false) +func (handler *routeHandler) dumpStaticRouteIPv4Details(fibDetails *l3ba.IPFibDetails) (*Route, error) { + return handler.dumpStaticRouteIPDetails(fibDetails.TableID, fibDetails.TableName, fibDetails.Address, fibDetails.AddressLength, fibDetails.Path, false) } -func dumpStaticRouteIPv6Details(fibDetails *l3ba.IP6FibDetails) (*vppcalls.Route, error) { - return dumpStaticRouteIPDetails(fibDetails.TableID, fibDetails.TableName, fibDetails.Address, fibDetails.AddressLength, fibDetails.Path, true) +func (handler *routeHandler) dumpStaticRouteIPv6Details(fibDetails *l3ba.IP6FibDetails) (*Route, error) { + return handler.dumpStaticRouteIPDetails(fibDetails.TableID, fibDetails.TableName, fibDetails.Address, fibDetails.AddressLength, fibDetails.Path, true) } // dumpStaticRouteIPDetails processes static route details and returns a route object -func dumpStaticRouteIPDetails(tableID uint32, tableName []byte, address []byte, prefixLen uint8, path []l3ba.FibPath, ipv6 bool) (*vppcalls.Route, error) { +func (handler *routeHandler) dumpStaticRouteIPDetails(tableID uint32, tableName []byte, address []byte, prefixLen uint8, path []l3ba.FibPath, ipv6 bool) (*Route, error) { // route details var ipAddr string if ipv6 { @@ -108,8 +99,8 @@ func dumpStaticRouteIPDetails(tableID uint32, tableName []byte, address []byte, ipAddr = fmt.Sprintf("%s/%d", net.IP(address[:4]).To4().String(), uint32(prefixLen)) } - rt := &vppcalls.Route{ - Type: vppcalls.IntraVrf, // default + rt := &Route{ + Type: IntraVrf, // default } // IP net @@ -134,9 +125,9 @@ func dumpStaticRouteIPDetails(tableID uint32, tableName []byte, address []byte, rt.NextHopAddr = nextHopAddr - if path[0].SwIfIndex == vppcalls.NextHopOutgoingIfUnset && path[0].TableID != tableID { + if path[0].SwIfIndex == NextHopOutgoingIfUnset && path[0].TableID != tableID { // outgoing interface not specified and path table id not equal to route table id = inter-VRF route - rt.Type = vppcalls.InterVrf + rt.Type = InterVrf rt.ViaVrfId = path[0].TableID } @@ -148,20 +139,16 @@ func dumpStaticRouteIPDetails(tableID uint32, tableName []byte, address []byte, return rt, nil } -// DumpArps dumps ARPs from VPP and fills them into the provided static route map. -func DumpArps(log logging.Logger, vppChan govppapi.Channel, timeLog measure.StopWatchEntry) ([]*vppcalls.ArpEntry, error) { - // IPFibDump time measurement - start := time.Now() - defer func() { - if timeLog != nil { - timeLog.LogTimeEntry(time.Since(start)) - } - }() +func (handler *arpVppHandler) DumpArpEntries() ([]*ArpEntry, error) { + // ArpDump time measurement + defer func(t time.Time) { + handler.stopwatch.TimeLog(ip.IPFibDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) - var arps []*vppcalls.ArpEntry + var arps []*ArpEntry // Dump ARPs. - reqCtx := vppChan.SendMultiRequest(&l3ba.IPNeighborDump{ + reqCtx := handler.callsChannel.SendMultiRequest(&l3ba.IPNeighborDump{ SwIfIndex: 0xffffffff, }) for { @@ -171,12 +158,12 @@ func DumpArps(log logging.Logger, vppChan govppapi.Channel, timeLog measure.Stop break } if err != nil { - log.Error(err) + handler.log.Error(err) return nil, err } var mac net.HardwareAddr = arpDetails.MacAddress - arp := &vppcalls.ArpEntry{ + arp := &ArpEntry{ Interface: arpDetails.SwIfIndex, MacAddress: mac.String(), Static: uintToBool(arpDetails.IsStatic), diff --git a/plugins/vpp/l3plugin/vppdump/dump_vppcalls_test.go b/plugins/vpp/l3plugin/vppcalls/dump_vppcalls_test.go similarity index 89% rename from plugins/vpp/l3plugin/vppdump/dump_vppcalls_test.go rename to plugins/vpp/l3plugin/vppcalls/dump_vppcalls_test.go index 584ad743cd..c8df8a69e3 100644 --- a/plugins/vpp/l3plugin/vppdump/dump_vppcalls_test.go +++ b/plugins/vpp/l3plugin/vppcalls/dump_vppcalls_test.go @@ -12,21 +12,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -package vppdump_test +package vppcalls import ( + "testing" + "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" - "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppdump" "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" - "testing" ) // Test dumping routes func TestDumpStaticRoutes(t *testing.T) { ctx := vppcallmock.SetupTestCtx(t) + l3handler, err := NewRouteVppHandler(ctx.MockChannel, logrus.DefaultLogger(), nil) + Expect(err).To(BeNil()) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPFibDetails{ @@ -38,7 +40,7 @@ func TestDumpStaticRoutes(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - routes, err := vppdump.DumpStaticRoutes(logrus.DefaultLogger(), ctx.MockChannel, nil) + routes, err := l3handler.DumpStaticRoutes() Expect(err).To(Succeed()) Expect(routes).To(HaveLen(2)) Expect(routes[0].OutIface).To(Equal(uint32(3))) diff --git a/plugins/vpp/l3plugin/vppcalls/proxyarp_vppcalls.go b/plugins/vpp/l3plugin/vppcalls/proxyarp_vppcalls.go index 7a666e5b0e..baec911ab2 100644 --- a/plugins/vpp/l3plugin/vppcalls/proxyarp_vppcalls.go +++ b/plugins/vpp/l3plugin/vppcalls/proxyarp_vppcalls.go @@ -19,8 +19,6 @@ import ( "time" govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" ) @@ -32,30 +30,26 @@ var ProxyArpMessages = []govppapi.Message{ &ip.ProxyArpAddDelReply{}, } -// EnableProxyArpInterface enables interface for proxy ARP -func EnableProxyArpInterface(swIfIdx uint32, vppChan govppapi.Channel, log logging.Logger, stopwatch *measure.Stopwatch) error { - return vppAddDelProxyArpInterface(swIfIdx, vppChan, true, log, stopwatch) +func (handler *proxyArpVppHandler) EnableProxyArpInterface(swIfIdx uint32) error { + return handler.vppAddDelProxyArpInterface(swIfIdx, true) } -// DisableProxyArpInterface disables interface for proxy ARP -func DisableProxyArpInterface(swIfIdx uint32, vppChan govppapi.Channel, log logging.Logger, stopwatch *measure.Stopwatch) error { - return vppAddDelProxyArpInterface(swIfIdx, vppChan, false, log, stopwatch) +func (handler *proxyArpVppHandler) DisableProxyArpInterface(swIfIdx uint32) error { + return handler.vppAddDelProxyArpInterface(swIfIdx, false) } -// AddProxyArpRange adds new IP range for proxy ARP -func AddProxyArpRange(firstIP, lastIP []byte, vppChan govppapi.Channel, log logging.Logger, stopwatch *measure.Stopwatch) error { - return vppAddDelProxyArpRange(firstIP, lastIP, vppChan, true, log, stopwatch) +func (handler *proxyArpVppHandler) AddProxyArpRange(firstIP, lastIP []byte) error { + return handler.vppAddDelProxyArpRange(firstIP, lastIP, true) } -// DeleteProxyArpRange removes proxy ARP IP range -func DeleteProxyArpRange(firstIP, lastIP []byte, vppChan govppapi.Channel, log logging.Logger, stopwatch *measure.Stopwatch) error { - return vppAddDelProxyArpRange(firstIP, lastIP, vppChan, false, log, stopwatch) +func (handler *proxyArpVppHandler) DeleteProxyArpRange(firstIP, lastIP []byte) error { + return handler.vppAddDelProxyArpRange(firstIP, lastIP, false) } // vppAddDelProxyArpInterface adds or removes proxy ARP interface entry according to provided input -func vppAddDelProxyArpInterface(swIfIdx uint32, vppChan govppapi.Channel, enable bool, log logging.Logger, stopwatch *measure.Stopwatch) error { +func (handler *proxyArpVppHandler) vppAddDelProxyArpInterface(swIfIdx uint32, enable bool) error { defer func(t time.Time) { - stopwatch.TimeLog(ip.ProxyArpIntfcEnableDisable{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(ip.ProxyArpIntfcEnableDisable{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &ip.ProxyArpIntfcEnableDisable{} @@ -68,22 +62,22 @@ func vppAddDelProxyArpInterface(swIfIdx uint32, vppChan govppapi.Channel, enable // Send message reply := &ip.ProxyArpIntfcEnableDisableReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { return fmt.Errorf("%s returned %d", reply.GetMessageName(), reply.Retval) } - log.Debugf("interface %v enabled for proxy arp: %v", req.SwIfIndex, enable) + handler.log.Debugf("interface %v enabled for proxy arp: %v", req.SwIfIndex, enable) return nil } // vppAddDelProxyArpRange adds or removes proxy ARP range according to provided input -func vppAddDelProxyArpRange(firstIP, lastIP []byte, vppChan govppapi.Channel, isAdd bool, log logging.Logger, stopwatch *measure.Stopwatch) error { +func (handler *proxyArpVppHandler) vppAddDelProxyArpRange(firstIP, lastIP []byte, isAdd bool) error { defer func(t time.Time) { - stopwatch.TimeLog(ip.ProxyArpAddDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(ip.ProxyArpAddDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &ip.ProxyArpAddDel{} @@ -99,14 +93,14 @@ func vppAddDelProxyArpRange(firstIP, lastIP []byte, vppChan govppapi.Channel, is // Send message reply := &ip.ProxyArpAddDelReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { return fmt.Errorf("%s returned %d", reply.GetMessageName(), reply.Retval) } - log.Debugf("proxy arp range: %v - %v added: %v", req.Proxy.LowAddress, req.Proxy.HiAddress, isAdd) + handler.log.Debugf("proxy arp range: %v - %v added: %v", req.Proxy.LowAddress, req.Proxy.HiAddress, isAdd) return nil } diff --git a/plugins/vpp/l3plugin/vppcalls/proxyarp_vppcalls_test.go b/plugins/vpp/l3plugin/vppcalls/proxyarp_vppcalls_test.go index 6b90946534..86e7924653 100644 --- a/plugins/vpp/l3plugin/vppcalls/proxyarp_vppcalls_test.go +++ b/plugins/vpp/l3plugin/vppcalls/proxyarp_vppcalls_test.go @@ -15,46 +15,57 @@ package vppcalls_test import ( + "testing" + "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" - "testing" ) // Test enable/disable proxy arp func TestProxyArp(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, arpHandler, pArpHandler := pArpTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.ProxyArpIntfcEnableDisableReply{}) - err := vppcalls.EnableProxyArpInterface(0, ctx.MockChannel, logrus.DefaultLogger(), nil) + err := pArpHandler.EnableProxyArpInterface(0) Expect(err).To(Succeed()) ctx.MockVpp.MockReply(&ip.ProxyArpIntfcEnableDisableReply{}) - err = vppcalls.DisableProxyArpInterface(0, ctx.MockChannel, logrus.DefaultLogger(), nil) + err = pArpHandler.DisableProxyArpInterface(0) Expect(err).To(Succeed()) ctx.MockVpp.MockReply(&ip.ProxyArpIntfcEnableDisableReply{Retval: 1}) - err = vppcalls.VppAddArp(&arpEntries[0], ctx.MockChannel, nil) + err = arpHandler.VppAddArp(&arpEntries[0]) Expect(err).To(Not(BeNil())) } // Test add/delete ip range for proxy arp func TestProxyArpRange(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, _, pArpHandler := pArpTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.ProxyArpAddDelReply{}) - err := vppcalls.AddProxyArpRange([]byte{192, 168, 10, 20}, []byte{192, 168, 10, 30}, ctx.MockChannel, logrus.DefaultLogger(), nil) + err := pArpHandler.AddProxyArpRange([]byte{192, 168, 10, 20}, []byte{192, 168, 10, 30}) Expect(err).To(Succeed()) ctx.MockVpp.MockReply(&ip.ProxyArpAddDelReply{}) - err = vppcalls.DeleteProxyArpRange([]byte{192, 168, 10, 23}, []byte{192, 168, 10, 27}, ctx.MockChannel, logrus.DefaultLogger(), nil) + err = pArpHandler.DeleteProxyArpRange([]byte{192, 168, 10, 23}, []byte{192, 168, 10, 27}) Expect(err).To(Succeed()) ctx.MockVpp.MockReply(&ip.ProxyArpAddDelReply{Retval: 1}) - err = vppcalls.AddProxyArpRange([]byte{192, 168, 10, 23}, []byte{192, 168, 10, 27}, ctx.MockChannel, logrus.DefaultLogger(), nil) + err = pArpHandler.AddProxyArpRange([]byte{192, 168, 10, 23}, []byte{192, 168, 10, 27}) Expect(err).To(Not(BeNil())) } + +func pArpTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.ArpVppAPI, vppcalls.ProxyArpVppAPI) { + ctx := vppcallmock.SetupTestCtx(t) + log := logrus.NewLogger("test-log") + arpHandler, err := vppcalls.NewArpVppHandler(ctx.MockChannel, log, nil) + Expect(err).To(BeNil()) + pArpHandler, err := vppcalls.NewProxyArpVppHandler(ctx.MockChannel, log, nil) + Expect(err).To(BeNil()) + return ctx, arpHandler, pArpHandler +} diff --git a/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go b/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go index d6148229f6..1638097c65 100644 --- a/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go +++ b/plugins/vpp/l3plugin/vppcalls/route_vppcalls.go @@ -20,8 +20,6 @@ import ( "time" govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/cn-infra/utils/addrs" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" @@ -74,9 +72,9 @@ const ( ) // vppAddDelRoute adds or removes route, according to provided input. Every route has to contain VRF ID (default is 0). -func vppAddDelRoute(route *Route, vppChan govppapi.Channel, delete bool, stopwatch *measure.Stopwatch) error { +func (handler *routeHandler) vppAddDelRoute(route *Route, delete bool) error { defer func(t time.Time) { - stopwatch.TimeLog(ip.IPAddDelRoute{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(ip.IPAddDelRoute{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &ip.IPAddDelRoute{} @@ -124,7 +122,7 @@ func vppAddDelRoute(route *Route, vppChan govppapi.Channel, delete bool, stopwat // Send message reply := &ip.IPAddDelRouteReply{} - if err = vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err = handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -134,12 +132,7 @@ func vppAddDelRoute(route *Route, vppChan govppapi.Channel, delete bool, stopwat return nil } -// VppAddRoute adds new route, according to provided input. Every route has to contain VRF ID (default is 0). -func VppAddRoute(route *Route, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - ifHandler, err := ifvppcalls.NewIfVppHandler(vppChan, logrus.DefaultLogger(), stopwatch) // TODO temp change - if err != nil { - return err - } +func (handler *routeHandler) VppAddRoute(ifHandler ifvppcalls.IfVppWrite, route *Route) error { if err := ifHandler.CreateVrfIfNeeded(route.VrfID); err != nil { return err } @@ -148,10 +141,9 @@ func VppAddRoute(route *Route, vppChan govppapi.Channel, stopwatch *measure.Stop return err } } - return vppAddDelRoute(route, vppChan, false, stopwatch) + return handler.vppAddDelRoute(route, false) } -// VppDelRoute removes old route, according to provided input. Every route has to contain VRF ID (default is 0). -func VppDelRoute(route *Route, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return vppAddDelRoute(route, vppChan, true, stopwatch) +func (handler *routeHandler) VppDelRoute(route *Route) error { + return handler.vppAddDelRoute(route, true) } diff --git a/plugins/vpp/l3plugin/vppcalls/route_vppcalls_test.go b/plugins/vpp/l3plugin/vppcalls/route_vppcalls_test.go index 7d5fc4ee7a..a3fbda30ab 100644 --- a/plugins/vpp/l3plugin/vppcalls/route_vppcalls_test.go +++ b/plugins/vpp/l3plugin/vppcalls/route_vppcalls_test.go @@ -15,13 +15,16 @@ package vppcalls_test import ( + "net" + "testing" + + "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" + ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" - "net" - "testing" ) var routes = []vppcalls.Route{ @@ -39,35 +42,45 @@ var routes = []vppcalls.Route{ // Test adding routes func TestAddRoute(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, ifHandler, rtHandler := routeTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPFibDetails{}) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) ctx.MockVpp.MockReply(&ip.IPTableAddDelReply{}) ctx.MockVpp.MockReply(&ip.IPAddDelRouteReply{}) - err := vppcalls.VppAddRoute(&routes[0], ctx.MockChannel, nil) + err := rtHandler.VppAddRoute(ifHandler, &routes[0]) Expect(err).To(Succeed()) ctx.MockVpp.MockReply(&ip.IPAddDelRouteReply{}) - err = vppcalls.VppAddRoute(&routes[0], ctx.MockChannel, nil) + err = rtHandler.VppAddRoute(ifHandler, &routes[0]) Expect(err).To(Not(BeNil())) } // Test deleteing routes func TestDeleteRoute(t *testing.T) { - ctx := vppcallmock.SetupTestCtx(t) + ctx, _, rtHandler := routeTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&ip.IPAddDelRouteReply{}) - err := vppcalls.VppDelRoute(&routes[0], ctx.MockChannel, nil) + err := rtHandler.VppDelRoute(&routes[0]) Expect(err).To(Succeed()) ctx.MockVpp.MockReply(&ip.IPAddDelRouteReply{}) - err = vppcalls.VppDelRoute(&routes[1], ctx.MockChannel, nil) + err = rtHandler.VppDelRoute(&routes[1]) Expect(err).To(Succeed()) ctx.MockVpp.MockReply(&ip.IPAddDelRouteReply{1}) - err = vppcalls.VppDelRoute(&routes[0], ctx.MockChannel, nil) + err = rtHandler.VppDelRoute(&routes[0]) Expect(err).To(Not(BeNil())) } + +func routeTestSetup(t *testing.T) (*vppcallmock.TestCtx, ifvppcalls.IfVppAPI, vppcalls.RouteVppAPI) { + ctx := vppcallmock.SetupTestCtx(t) + log := logrus.NewLogger("test-log") + ifHandler, err := ifvppcalls.NewIfVppHandler(ctx.MockChannel, log, nil) + Expect(err).To(BeNil()) + rtHandler, err := vppcalls.NewRouteVppHandler(ctx.MockChannel, log, nil) + Expect(err).To(BeNil()) + return ctx, ifHandler, rtHandler +} diff --git a/plugins/vpp/l3plugin/vppdump/doc.go b/plugins/vpp/l3plugin/vppdump/doc.go deleted file mode 100644 index f390e50fe1..0000000000 --- a/plugins/vpp/l3plugin/vppdump/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package vppdump provides helpers for dumping all L3 FIBs configured in VPP. -package vppdump From 52c9bb1d4fe8b80cd1e94107b556bc2c38bba62d Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Tue, 24 Jul 2018 11:10:57 +0200 Subject: [PATCH 037/174] fix build Signed-off-by: Vladimir Lavor --- .../vpp/l3plugin/vppcalls/dump_vppcalls.go | 25 +++++++++---------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/plugins/vpp/l3plugin/vppcalls/dump_vppcalls.go b/plugins/vpp/l3plugin/vppcalls/dump_vppcalls.go index b52f787d69..f8387b1212 100644 --- a/plugins/vpp/l3plugin/vppcalls/dump_vppcalls.go +++ b/plugins/vpp/l3plugin/vppcalls/dump_vppcalls.go @@ -21,23 +21,22 @@ import ( "time" - "git.fd.io/govpp.git/examples/bin_api/ip" "github.com/ligato/cn-infra/utils/addrs" - l3ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" + l3binapi "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" ) func (handler *routeHandler) DumpStaticRoutes() ([]*Route, error) { // IPFibDump time measurement defer func(t time.Time) { - handler.stopwatch.TimeLog(ip.IPFibDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(l3binapi.IPFibDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) var routes []*Route // Dump IPv4 l3 FIB. - reqCtx := handler.callsChannel.SendMultiRequest(&l3ba.IPFibDump{}) + reqCtx := handler.callsChannel.SendMultiRequest(&l3binapi.IPFibDump{}) for { - fibDetails := &l3ba.IPFibDetails{} + fibDetails := &l3binapi.IPFibDetails{} stop, err := reqCtx.ReceiveReply(fibDetails) if stop { break // Break from the loop. @@ -57,9 +56,9 @@ func (handler *routeHandler) DumpStaticRoutes() ([]*Route, error) { } // Dump IPv6 l3 FIB. - reqCtx = handler.callsChannel.SendMultiRequest(&l3ba.IP6FibDump{}) + reqCtx = handler.callsChannel.SendMultiRequest(&l3binapi.IP6FibDump{}) for { - fibDetails := &l3ba.IP6FibDetails{} + fibDetails := &l3binapi.IP6FibDetails{} stop, err := reqCtx.ReceiveReply(fibDetails) if stop { break // break out of the loop @@ -81,16 +80,16 @@ func (handler *routeHandler) DumpStaticRoutes() ([]*Route, error) { return routes, nil } -func (handler *routeHandler) dumpStaticRouteIPv4Details(fibDetails *l3ba.IPFibDetails) (*Route, error) { +func (handler *routeHandler) dumpStaticRouteIPv4Details(fibDetails *l3binapi.IPFibDetails) (*Route, error) { return handler.dumpStaticRouteIPDetails(fibDetails.TableID, fibDetails.TableName, fibDetails.Address, fibDetails.AddressLength, fibDetails.Path, false) } -func (handler *routeHandler) dumpStaticRouteIPv6Details(fibDetails *l3ba.IP6FibDetails) (*Route, error) { +func (handler *routeHandler) dumpStaticRouteIPv6Details(fibDetails *l3binapi.IP6FibDetails) (*Route, error) { return handler.dumpStaticRouteIPDetails(fibDetails.TableID, fibDetails.TableName, fibDetails.Address, fibDetails.AddressLength, fibDetails.Path, true) } // dumpStaticRouteIPDetails processes static route details and returns a route object -func (handler *routeHandler) dumpStaticRouteIPDetails(tableID uint32, tableName []byte, address []byte, prefixLen uint8, path []l3ba.FibPath, ipv6 bool) (*Route, error) { +func (handler *routeHandler) dumpStaticRouteIPDetails(tableID uint32, tableName []byte, address []byte, prefixLen uint8, path []l3binapi.FibPath, ipv6 bool) (*Route, error) { // route details var ipAddr string if ipv6 { @@ -142,17 +141,17 @@ func (handler *routeHandler) dumpStaticRouteIPDetails(tableID uint32, tableName func (handler *arpVppHandler) DumpArpEntries() ([]*ArpEntry, error) { // ArpDump time measurement defer func(t time.Time) { - handler.stopwatch.TimeLog(ip.IPFibDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(l3binapi.IPFibDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) var arps []*ArpEntry // Dump ARPs. - reqCtx := handler.callsChannel.SendMultiRequest(&l3ba.IPNeighborDump{ + reqCtx := handler.callsChannel.SendMultiRequest(&l3binapi.IPNeighborDump{ SwIfIndex: 0xffffffff, }) for { - arpDetails := &l3ba.IPNeighborDetails{} + arpDetails := &l3binapi.IPNeighborDetails{} stop, err := reqCtx.ReceiveReply(arpDetails) if stop { break From ea8e83b4c762a466bf1dbde1638a7cb34fe8e229 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 9 Jul 2018 15:21:44 +0200 Subject: [PATCH 038/174] ipsec vppcalls api Signed-off-by: Vladimir Lavor --- plugins/vpp/ifplugin/vppcalls/api_vppcalls.go | 2 +- plugins/vpp/ipsecplugin/ipsec_config.go | 38 +++++---- .../vpp/ipsecplugin/vppcalls/api_vppcalls.go | 78 ++++++++++++++++++ .../ipsecplugin/vppcalls/ipsec_vppcalls.go | 81 ++++++++----------- 4 files changed, 135 insertions(+), 64 deletions(-) create mode 100644 plugins/vpp/ipsecplugin/vppcalls/api_vppcalls.go diff --git a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go index 8b73ce656a..98f156b8be 100644 --- a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go @@ -29,7 +29,7 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/model/nat" ) -// IfVppAPI provides methods for creating and managing BFD +// IfVppAPI provides methods for creating and managing interface plugin type IfVppAPI interface { IfVppWrite IfVppRead diff --git a/plugins/vpp/ipsecplugin/ipsec_config.go b/plugins/vpp/ipsecplugin/ipsec_config.go index d3eb03566d..a808ce3d53 100644 --- a/plugins/vpp/ipsecplugin/ipsec_config.go +++ b/plugins/vpp/ipsecplugin/ipsec_config.go @@ -62,7 +62,8 @@ type IPSecConfigurator struct { vppCh govppapi.Channel // VPP API handlers - ifHandler iface_vppcalls.IfVppAPI + ifHandler iface_vppcalls.IfVppAPI + ipSecHandler vppcalls.IPsecVppAPI // Timer used to measure and store time stopwatch *measure.Stopwatch @@ -75,6 +76,11 @@ func (plugin *IPSecConfigurator) Init(logger logging.PluginLogger, goVppMux govp plugin.log = logger.NewLogger("-ipsec-plugin") plugin.log.Debug("Initializing IPSec configurator") + // Configurator-wide stopwatch instance + if enableStopwatch { + plugin.stopwatch = measure.NewStopwatch("IPSec-configurator", plugin.log) + } + // Mappings plugin.ifIndexes = swIfIndexes plugin.spdIndexes = ipsecidx.NewSPDIndex(nametoidx.NewNameToIdx(plugin.log, "ipsec_spd_indexes", nil)) @@ -89,15 +95,13 @@ func (plugin *IPSecConfigurator) Init(logger logging.PluginLogger, goVppMux govp return err } - // Stopwatch - if enableStopwatch { - plugin.stopwatch = measure.NewStopwatch("IPSecConfigurator", plugin.log) - } - // VPP API handlers if plugin.ifHandler, err = iface_vppcalls.NewIfVppHandler(plugin.vppCh, plugin.log, plugin.stopwatch); err != nil { return err } + if plugin.ipSecHandler, err = vppcalls.NewIPsecVppHandler(plugin.vppCh, plugin.log, plugin.stopwatch); err != nil { + return err + } // Message compatibility if err = plugin.vppCh.CheckMessageCompatibility(vppcalls.IPSecMessages...); err != nil { @@ -153,7 +157,7 @@ func (plugin *IPSecConfigurator) ConfigureSPD(spd *ipsec.SecurityPolicyDatabases func (plugin *IPSecConfigurator) configureSPD(spdID uint32, spd *ipsec.SecurityPolicyDatabases_SPD) error { plugin.log.Debugf("configuring SPD %v (%d)", spd.Name, spdID) - if err := vppcalls.AddSPD(spdID, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ipSecHandler.AddSPD(spdID); err != nil { return err } @@ -170,7 +174,7 @@ func (plugin *IPSecConfigurator) configureSPD(spdID uint32, spd *ipsec.SecurityP continue } - if err := vppcalls.InterfaceAddSPD(spdID, swIfIdx, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ipSecHandler.InterfaceAddSPD(spdID, swIfIdx); err != nil { plugin.log.Errorf("assigning interface to SPD failed: %v", err) continue } @@ -190,7 +194,7 @@ func (plugin *IPSecConfigurator) configureSPD(spdID uint32, spd *ipsec.SecurityP } } - if err := vppcalls.AddSPDEntry(spdID, saID, entry, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ipSecHandler.AddSPDEntry(spdID, saID, entry); err != nil { plugin.log.Errorf("adding SPD policy entry failed: %v", err) continue } @@ -234,7 +238,7 @@ func (plugin *IPSecConfigurator) DeleteSPD(oldSpd *ipsec.SecurityPolicyDatabases plugin.log.Warnf("SPD %q not found", oldSpd.Name) return nil } - if err := vppcalls.DelSPD(spdID, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ipSecHandler.DelSPD(spdID); err != nil { return err } @@ -259,7 +263,7 @@ func (plugin *IPSecConfigurator) ConfigureSA(sa *ipsec.SecurityAssociations_SA) saID := plugin.saIndexSeq plugin.saIndexSeq++ - if err := vppcalls.AddSAEntry(saID, sa, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ipSecHandler.AddSAEntry(saID, sa); err != nil { return err } @@ -322,7 +326,7 @@ func (plugin *IPSecConfigurator) DeleteSA(oldSa *ipsec.SecurityAssociations_SA) plugin.log.Warnf("caching SPD %v due removed SA %v", entry.SPD.Name, oldSa.Name) } - if err := vppcalls.DelSAEntry(saID, oldSa, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ipSecHandler.DelSAEntry(saID, oldSa); err != nil { return err } @@ -336,7 +340,7 @@ func (plugin *IPSecConfigurator) DeleteSA(oldSa *ipsec.SecurityAssociations_SA) func (plugin *IPSecConfigurator) ConfigureTunnel(tunnel *ipsec.TunnelInterfaces_Tunnel) error { plugin.log.Debugf("Configuring Tunnel %v", tunnel.Name) - ifIdx, err := vppcalls.AddTunnelInterface(tunnel, plugin.vppCh, plugin.stopwatch) + ifIdx, err := plugin.ipSecHandler.AddTunnelInterface(tunnel) if err != nil { return err } @@ -395,7 +399,7 @@ func (plugin *IPSecConfigurator) DeleteTunnel(oldTunnel *ipsec.TunnelInterfaces_ return nil } - if err := vppcalls.DelTunnelInterface(ifIdx, oldTunnel, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ipSecHandler.DelTunnelInterface(ifIdx, oldTunnel); err != nil { return err } @@ -412,13 +416,13 @@ func (plugin *IPSecConfigurator) ResolveCreatedInterface(ifName string, swIfIdx plugin.log.Infof("Assigning SPD %v to interface %q", entry.spdID, ifName) // TODO: loop through stored deletes, this is now needed because old assignment might still exist - if err := vppcalls.InterfaceDelSPD(entry.spdID, swIfIdx, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ipSecHandler.InterfaceDelSPD(entry.spdID, swIfIdx); err != nil { plugin.log.Errorf("unassigning interface from SPD failed: %v", err) } else { plugin.log.Infof("Unassigned SPD %v from interface %q", entry.spdID, ifName) } - if err := vppcalls.InterfaceAddSPD(entry.spdID, swIfIdx, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ipSecHandler.InterfaceAddSPD(entry.spdID, swIfIdx); err != nil { plugin.log.Errorf("assigning interface to SPD failed: %v", err) continue } else { @@ -436,7 +440,7 @@ func (plugin *IPSecConfigurator) ResolveDeletedInterface(ifName string, swIfIdx plugin.log.Infof("Unassigning SPD %v from interface %q", assign.SpdID, ifName) // TODO: just store this for future, because this will fail since swIfIdx no longer exists - if err := vppcalls.InterfaceDelSPD(assign.SpdID, swIfIdx, plugin.vppCh, plugin.stopwatch); err != nil { + if err := plugin.ipSecHandler.InterfaceDelSPD(assign.SpdID, swIfIdx); err != nil { plugin.log.Errorf("unassigning interface from SPD failed: %v", err) } else { plugin.log.Infof("Unassigned SPD %v from interface %q", assign.SpdID, ifName) diff --git a/plugins/vpp/ipsecplugin/vppcalls/api_vppcalls.go b/plugins/vpp/ipsecplugin/vppcalls/api_vppcalls.go new file mode 100644 index 0000000000..0b8d75a374 --- /dev/null +++ b/plugins/vpp/ipsecplugin/vppcalls/api_vppcalls.go @@ -0,0 +1,78 @@ +// Copyright (c) 2017 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vppcalls + +import ( + govppapi "git.fd.io/govpp.git/api" + "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/logging/measure" + "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" +) + +// IPsecVppAPI provides methods for creating and managing of a IPsec configuration +type IPsecVppAPI interface { + IPsecVppWrite + IPsecVPPRead +} + +// IPsecVppWrite provides write methods for IPsec +type IPsecVppWrite interface { + // AddTunnelInterface adds tunnel interface + AddTunnelInterface(tunnel *ipsec.TunnelInterfaces_Tunnel) (uint32, error) + // DelTunnelInterface removes tunnel interface + DelTunnelInterface(ifIdx uint32, tunnel *ipsec.TunnelInterfaces_Tunnel) error + // AddSPD adds SPD to VPP via binary API + AddSPD(spdID uint32) error + // DelSPD deletes SPD from VPP via binary API + DelSPD(spdID uint32) error + // InterfaceAddSPD adds SPD interface assignment to VPP via binary API + InterfaceAddSPD(spdID, swIfIdx uint32) error + // InterfaceDelSPD deletes SPD interface assignment from VPP via binary API + InterfaceDelSPD(spdID, swIfIdx uint32) error + // AddSPDEntry adds SPD policy entry to VPP via binary API + AddSPDEntry(spdID, saID uint32, spd *ipsec.SecurityPolicyDatabases_SPD_PolicyEntry) error + // DelSPDEntry deletes SPD policy entry from VPP via binary API + DelSPDEntry(spdID, saID uint32, spd *ipsec.SecurityPolicyDatabases_SPD_PolicyEntry) error + // AddSAEntry adds SA to VPP via binary API + AddSAEntry(saID uint32, sa *ipsec.SecurityAssociations_SA) error + // DelSAEntry deletes SA from VPP via binary API + DelSAEntry(saID uint32, sa *ipsec.SecurityAssociations_SA) error +} + +// IPsecVppWrite provides read methods for IPsec +type IPsecVPPRead interface { + // TODO define dump methods +} + +// ipSecVppHandler is accessor for IPsec-related vppcalls methods +type ipSecVppHandler struct { + stopwatch *measure.Stopwatch + callsChannel govppapi.Channel + log logging.Logger +} + +// NewIPsecVppHandler creates new instance of IPsec vppcalls handler +func NewIPsecVppHandler(callsChan govppapi.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*ipSecVppHandler, error) { + handler := &ipSecVppHandler{ + callsChannel: callsChan, + stopwatch: stopwatch, + log: log, + } + if err := handler.callsChannel.CheckMessageCompatibility(IPSecMessages...); err != nil { + return nil, err + } + + return handler, nil +} diff --git a/plugins/vpp/ipsecplugin/vppcalls/ipsec_vppcalls.go b/plugins/vpp/ipsecplugin/vppcalls/ipsec_vppcalls.go index e9470a6cc8..6f6bb48619 100644 --- a/plugins/vpp/ipsecplugin/vppcalls/ipsec_vppcalls.go +++ b/plugins/vpp/ipsecplugin/vppcalls/ipsec_vppcalls.go @@ -21,7 +21,6 @@ import ( "time" govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/cn-infra/utils/addrs" ipsec_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/ipsec" "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" @@ -49,9 +48,9 @@ var IPSecMessages = []govppapi.Message{ &ipsec_api.IpsecTunnelIfSetSaReply{}, } -func tunnelIfAddDel(tunnel *ipsec.TunnelInterfaces_Tunnel, isAdd bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (uint32, error) { +func (handler *ipSecVppHandler) tunnelIfAddDel(tunnel *ipsec.TunnelInterfaces_Tunnel, isAdd bool) (uint32, error) { defer func(t time.Time) { - stopwatch.TimeLog(ipsec_api.IpsecTunnelIfAddDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(ipsec_api.IpsecTunnelIfAddDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) localCryptoKey, err := hex.DecodeString(tunnel.LocalCryptoKey) @@ -92,7 +91,7 @@ func tunnelIfAddDel(tunnel *ipsec.TunnelInterfaces_Tunnel, isAdd bool, vppChan g } reply := &ipsec_api.IpsecTunnelIfAddDelReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return 0, err } if reply.Retval != 0 { @@ -102,21 +101,19 @@ func tunnelIfAddDel(tunnel *ipsec.TunnelInterfaces_Tunnel, isAdd bool, vppChan g return reply.SwIfIndex, nil } -// AddSPD adds SPD to VPP via binary API -func AddTunnelInterface(tunnel *ipsec.TunnelInterfaces_Tunnel, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (uint32, error) { - return tunnelIfAddDel(tunnel, true, vppChan, stopwatch) +func (handler *ipSecVppHandler) AddTunnelInterface(tunnel *ipsec.TunnelInterfaces_Tunnel) (uint32, error) { + return handler.tunnelIfAddDel(tunnel, true) } -// DelSPD deletes SPD from VPP via binary API -func DelTunnelInterface(ifIdx uint32, tunnel *ipsec.TunnelInterfaces_Tunnel, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ipSecVppHandler) DelTunnelInterface(ifIdx uint32, tunnel *ipsec.TunnelInterfaces_Tunnel) error { // Note: ifIdx is not used now, tunnel shiould be matched based on paramters - _, err := tunnelIfAddDel(tunnel, false, vppChan, stopwatch) + _, err := handler.tunnelIfAddDel(tunnel, false) return err } -func spdAddDel(spdID uint32, isAdd bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ipSecVppHandler) spdAddDel(spdID uint32, isAdd bool) error { defer func(t time.Time) { - stopwatch.TimeLog(ipsec_api.IpsecSpdAddDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(ipsec_api.IpsecSpdAddDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &ipsec_api.IpsecSpdAddDel{ @@ -125,7 +122,7 @@ func spdAddDel(spdID uint32, isAdd bool, vppChan govppapi.Channel, stopwatch *me } reply := &ipsec_api.IpsecSpdAddDelReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -135,19 +132,17 @@ func spdAddDel(spdID uint32, isAdd bool, vppChan govppapi.Channel, stopwatch *me return nil } -// AddSPD adds SPD to VPP via binary API -func AddSPD(spdID uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return spdAddDel(spdID, true, vppChan, stopwatch) +func (handler *ipSecVppHandler) AddSPD(spdID uint32) error { + return handler.spdAddDel(spdID, true) } -// DelSPD deletes SPD from VPP via binary API -func DelSPD(spdID uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return spdAddDel(spdID, false, vppChan, stopwatch) +func (handler *ipSecVppHandler) DelSPD(spdID uint32) error { + return handler.spdAddDel(spdID, false) } -func interfaceAddDelSpd(spdID, swIfIdx uint32, isAdd bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ipSecVppHandler) interfaceAddDelSpd(spdID, swIfIdx uint32, isAdd bool) error { defer func(t time.Time) { - stopwatch.TimeLog(ipsec_api.IpsecInterfaceAddDelSpd{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(ipsec_api.IpsecInterfaceAddDelSpd{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &ipsec_api.IpsecInterfaceAddDelSpd{ @@ -157,7 +152,7 @@ func interfaceAddDelSpd(spdID, swIfIdx uint32, isAdd bool, vppChan govppapi.Chan } reply := &ipsec_api.IpsecInterfaceAddDelSpdReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -167,19 +162,17 @@ func interfaceAddDelSpd(spdID, swIfIdx uint32, isAdd bool, vppChan govppapi.Chan return nil } -// InterfaceAddSPD adds SPD interface assignment to VPP via binary API -func InterfaceAddSPD(spdID, swIfIdx uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return interfaceAddDelSpd(spdID, swIfIdx, true, vppChan, stopwatch) +func (handler *ipSecVppHandler) InterfaceAddSPD(spdID, swIfIdx uint32) error { + return handler.interfaceAddDelSpd(spdID, swIfIdx, true) } -// InterfaceDelSPD deletes SPD interface assignment from VPP via binary API -func InterfaceDelSPD(spdID, swIfIdx uint32, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return interfaceAddDelSpd(spdID, swIfIdx, false, vppChan, stopwatch) +func (handler *ipSecVppHandler) InterfaceDelSPD(spdID, swIfIdx uint32) error { + return handler.interfaceAddDelSpd(spdID, swIfIdx, false) } -func spdAddDelEntry(spdID, saID uint32, spd *ipsec.SecurityPolicyDatabases_SPD_PolicyEntry, isAdd bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ipSecVppHandler) spdAddDelEntry(spdID, saID uint32, spd *ipsec.SecurityPolicyDatabases_SPD_PolicyEntry, isAdd bool) error { defer func(t time.Time) { - stopwatch.TimeLog(ipsec_api.IpsecSpdAddDelEntry{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(ipsec_api.IpsecSpdAddDelEntry{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &ipsec_api.IpsecSpdAddDelEntry{ @@ -227,7 +220,7 @@ func spdAddDelEntry(spdID, saID uint32, spd *ipsec.SecurityPolicyDatabases_SPD_P } reply := &ipsec_api.IpsecSpdAddDelEntryReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -237,19 +230,17 @@ func spdAddDelEntry(spdID, saID uint32, spd *ipsec.SecurityPolicyDatabases_SPD_P return nil } -// AddSPDEntry adds SPD policy entry to VPP via binary API -func AddSPDEntry(spdID, saID uint32, spd *ipsec.SecurityPolicyDatabases_SPD_PolicyEntry, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return spdAddDelEntry(spdID, saID, spd, true, vppChan, stopwatch) +func (handler *ipSecVppHandler) AddSPDEntry(spdID, saID uint32, spd *ipsec.SecurityPolicyDatabases_SPD_PolicyEntry) error { + return handler.spdAddDelEntry(spdID, saID, spd, true) } -// DelSPDEntry deletes SPD policy entry from VPP via binary API -func DelSPDEntry(spdID, saID uint32, spd *ipsec.SecurityPolicyDatabases_SPD_PolicyEntry, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return spdAddDelEntry(spdID, saID, spd, false, vppChan, stopwatch) +func (handler *ipSecVppHandler) DelSPDEntry(spdID, saID uint32, spd *ipsec.SecurityPolicyDatabases_SPD_PolicyEntry) error { + return handler.spdAddDelEntry(spdID, saID, spd, false) } -func sadAddDelEntry(saID uint32, sa *ipsec.SecurityAssociations_SA, isAdd bool, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { +func (handler *ipSecVppHandler) sadAddDelEntry(saID uint32, sa *ipsec.SecurityAssociations_SA, isAdd bool) error { defer func(t time.Time) { - stopwatch.TimeLog(ipsec_api.IpsecSadAddDelEntry{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(ipsec_api.IpsecSadAddDelEntry{}).LogTimeEntry(time.Since(t)) }(time.Now()) cryptoKey, err := hex.DecodeString(sa.CryptoKey) @@ -294,7 +285,7 @@ func sadAddDelEntry(saID uint32, sa *ipsec.SecurityAssociations_SA, isAdd bool, } reply := &ipsec_api.IpsecSadAddDelEntryReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -304,14 +295,12 @@ func sadAddDelEntry(saID uint32, sa *ipsec.SecurityAssociations_SA, isAdd bool, return nil } -// AddSAEntry adds SA to VPP via binary API -func AddSAEntry(saID uint32, sa *ipsec.SecurityAssociations_SA, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return sadAddDelEntry(saID, sa, true, vppChan, stopwatch) +func (handler *ipSecVppHandler) AddSAEntry(saID uint32, sa *ipsec.SecurityAssociations_SA) error { + return handler.sadAddDelEntry(saID, sa, true) } -// DelSAEntry deletes SA from VPP via binary API -func DelSAEntry(saID uint32, sa *ipsec.SecurityAssociations_SA, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) error { - return sadAddDelEntry(saID, sa, false, vppChan, stopwatch) +func (handler *ipSecVppHandler) DelSAEntry(saID uint32, sa *ipsec.SecurityAssociations_SA) error { + return handler.sadAddDelEntry(saID, sa, false) } func boolToUint(value bool) uint8 { From e5bb2bd53d0d47a4707c5199e96b8c1f62d15bc9 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Tue, 10 Jul 2018 09:30:27 +0200 Subject: [PATCH 039/174] api for l4plugin vppcalls Signed-off-by: Vladimir Lavor --- plugins/vpp/l4plugin/appns_config.go | 16 +++-- plugins/vpp/l4plugin/vppcalls/api_vppcalls.go | 60 +++++++++++++++++++ .../vppcalls/app_namespace_vppcalls.go | 8 +-- .../l4plugin/vppcalls/features_vppcalls.go | 11 ++-- 4 files changed, 79 insertions(+), 16 deletions(-) create mode 100644 plugins/vpp/l4plugin/vppcalls/api_vppcalls.go diff --git a/plugins/vpp/l4plugin/appns_config.go b/plugins/vpp/l4plugin/appns_config.go index 2df5453d99..151d1c1b64 100644 --- a/plugins/vpp/l4plugin/appns_config.go +++ b/plugins/vpp/l4plugin/appns_config.go @@ -47,7 +47,10 @@ type AppNsConfigurator struct { appNsIdxSeq uint32 // VPP channel - vppChan govppapi.Channel + vppChan govppapi.Channel + // VPP API handler + l4Handler vppcalls.L4VppAPI + stopwatch *measure.Stopwatch // Feature flag - internal state whether the L4 features are enabled or disabled @@ -77,6 +80,11 @@ func (plugin *AppNsConfigurator) Init(logger logging.PluginLogger, goVppMux govp return err } + // VPP API handler + if plugin.l4Handler, err = vppcalls.NewL4VppHandler(plugin.vppChan, plugin.stopwatch); err != nil { + return err + } + // Message compatibility if err = plugin.vppChan.CheckMessageCompatibility(vppcalls.AppNsMessages...); err != nil { plugin.log.Error(err) @@ -122,7 +130,7 @@ func (plugin *AppNsConfigurator) ConfigureL4FeatureFlag(features *l4.L4Features) func (plugin *AppNsConfigurator) configureL4FeatureFlag() error { plugin.log.Info("Configuring L4 features") - if err := vppcalls.EnableL4Features(plugin.vppChan); err != nil { + if err := plugin.l4Handler.EnableL4Features(); err != nil { plugin.log.Errorf("Enabling L4 features failed: %v", err) return err } @@ -136,7 +144,7 @@ func (plugin *AppNsConfigurator) configureL4FeatureFlag() error { func (plugin *AppNsConfigurator) DeleteL4FeatureFlag() error { plugin.log.Info("Removing L4 features") - if err := vppcalls.DisableL4Features(plugin.vppChan); err != nil { + if err := plugin.l4Handler.DisableL4Features(); err != nil { plugin.log.Errorf("Disabling L4 features failed: %v", err) return err } @@ -271,7 +279,7 @@ func (plugin *AppNsConfigurator) configureAppNamespace(ns *l4.AppNamespaces_AppN plugin.log.Debugf("Adding App Namespace %v to interface %v", ns.NamespaceId, ifIdx) - appNsIdx, err := vppcalls.AddAppNamespace(ns.Secret, ifIdx, ns.Ipv4FibId, ns.Ipv6FibId, nsID, plugin.vppChan, plugin.stopwatch) + appNsIdx, err := plugin.l4Handler.AddAppNamespace(ns.Secret, ifIdx, ns.Ipv4FibId, ns.Ipv6FibId, nsID) if err != nil { return err } diff --git a/plugins/vpp/l4plugin/vppcalls/api_vppcalls.go b/plugins/vpp/l4plugin/vppcalls/api_vppcalls.go new file mode 100644 index 0000000000..8f5ab792dd --- /dev/null +++ b/plugins/vpp/l4plugin/vppcalls/api_vppcalls.go @@ -0,0 +1,60 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vppcalls + +import ( + govppapi "git.fd.io/govpp.git/api" + "github.com/ligato/cn-infra/logging/measure" +) + +// L4VppAPI provides methods for managing L4 layer configuration +type L4VppAPI interface { + L4VppWrite + L4VppRead +} + +// L4VppWrite provides write methods for L4 +type L4VppWrite interface { + // EnableL4Features sets L4 feature flag on VPP to true + EnableL4Features() error + // DisableL4Features sets L4 feature flag on VPP to false + DisableL4Features() error + // AddAppNamespace calls respective VPP binary api to configure AppNamespace + AddAppNamespace(secret uint64, swIfIdx, ip4FibID, ip6FibID uint32, id []byte) (appNsIdx uint32, err error) +} + +// L4VppRead provides read methods for L4 +type L4VppRead interface { + // Todo define dump methods +} + +// l4VppHandler is accessor for l4-related vppcalls methods +type l4VppHandler struct { + stopwatch *measure.Stopwatch + callsChannel govppapi.Channel +} + +// NewL4VppHandler creates new instance of L4 vppcalls handler +func NewL4VppHandler(callsChan govppapi.Channel, stopwatch *measure.Stopwatch) (*l4VppHandler, error) { + handler := &l4VppHandler{ + callsChannel: callsChan, + stopwatch: stopwatch, + } + if err := handler.callsChannel.CheckMessageCompatibility(AppNsMessages...); err != nil { + return nil, err + } + + return handler, nil +} diff --git a/plugins/vpp/l4plugin/vppcalls/app_namespace_vppcalls.go b/plugins/vpp/l4plugin/vppcalls/app_namespace_vppcalls.go index e962922ba8..12d88504d1 100644 --- a/plugins/vpp/l4plugin/vppcalls/app_namespace_vppcalls.go +++ b/plugins/vpp/l4plugin/vppcalls/app_namespace_vppcalls.go @@ -19,7 +19,6 @@ import ( "time" govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/session" ) @@ -31,10 +30,9 @@ var AppNsMessages = []govppapi.Message{ &session.SessionEnableDisableReply{}, } -// AddAppNamespace calls respective VPP binary api to configure AppNamespace -func AddAppNamespace(secret uint64, swIfIdx, ip4FibID, ip6FibID uint32, id []byte, vppChan govppapi.Channel, stopwatch *measure.Stopwatch) (appNsIdx uint32, err error) { +func (handler *l4VppHandler) AddAppNamespace(secret uint64, swIfIdx, ip4FibID, ip6FibID uint32, id []byte) (appNsIdx uint32, err error) { defer func(t time.Time) { - stopwatch.TimeLog(session.AppNamespaceAddDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(session.AppNamespaceAddDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &session.AppNamespaceAddDel{ @@ -47,7 +45,7 @@ func AddAppNamespace(secret uint64, swIfIdx, ip4FibID, ip6FibID uint32, id []byt } reply := &session.AppNamespaceAddDelReply{} - if err = vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err = handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return 0, err } if reply.Retval != 0 { diff --git a/plugins/vpp/l4plugin/vppcalls/features_vppcalls.go b/plugins/vpp/l4plugin/vppcalls/features_vppcalls.go index ae486f9ed6..7c6d22890c 100644 --- a/plugins/vpp/l4plugin/vppcalls/features_vppcalls.go +++ b/plugins/vpp/l4plugin/vppcalls/features_vppcalls.go @@ -17,18 +17,16 @@ package vppcalls import ( "fmt" - govppapi "git.fd.io/govpp.git/api" "github.com/ligato/vpp-agent/plugins/vpp/binapi/session" ) -// EnableL4Features sets L4 feature flag on VPP to true -func EnableL4Features(vppChan govppapi.Channel) error { +func (handler *l4VppHandler) EnableL4Features() error { req := &session.SessionEnableDisable{ IsEnable: 1, } reply := &session.SessionEnableDisableReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -38,14 +36,13 @@ func EnableL4Features(vppChan govppapi.Channel) error { return nil } -// DisableL4Features sets L4 feature flag on VPP to false -func DisableL4Features(vppChan govppapi.Channel) error { +func (handler *l4VppHandler) DisableL4Features() error { req := &session.SessionEnableDisable{ IsEnable: 0, } reply := &session.SessionEnableDisableReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { From e9d09afe3e653cd3912df6055bc0c4a5b5c54202 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Wed, 11 Jul 2018 10:39:48 +0200 Subject: [PATCH 040/174] sr pluign vppcalls api Signed-off-by: Vladimir Lavor --- plugins/vpp/plugin_impl_vpp.go | 23 +-- plugins/vpp/srplugin/data_resync.go | 12 +- plugins/vpp/srplugin/srv6_config.go | 84 +++++---- plugins/vpp/srplugin/srv6_config_test.go | 9 +- plugins/vpp/srplugin/vppcalls/api_vppcalls.go | 79 +++++++++ plugins/vpp/srplugin/vppcalls/srv6.go | 162 ++++++++---------- plugins/vpp/srplugin/vppcalls/srv6_test.go | 27 +-- tests/vppcallfake/srv6calls.go | 19 +- 8 files changed, 239 insertions(+), 176 deletions(-) create mode 100644 plugins/vpp/srplugin/vppcalls/api_vppcalls.go diff --git a/plugins/vpp/plugin_impl_vpp.go b/plugins/vpp/plugin_impl_vpp.go index 22cfdd1057..e3f5b8f8a9 100644 --- a/plugins/vpp/plugin_impl_vpp.go +++ b/plugins/vpp/plugin_impl_vpp.go @@ -22,7 +22,6 @@ import ( govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/datasync" "github.com/ligato/cn-infra/flavors/local" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/cn-infra/messaging" "github.com/ligato/cn-infra/utils/safeclose" "github.com/ligato/vpp-agent/idxvpp" @@ -44,7 +43,6 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp/model/nat" "github.com/ligato/vpp-agent/plugins/vpp/rpc" "github.com/ligato/vpp-agent/plugins/vpp/srplugin" - "github.com/ligato/vpp-agent/plugins/vpp/srplugin/vppcalls" "github.com/namsral/flag" ) @@ -573,7 +571,7 @@ func (plugin *Plugin) initL3(ctx context.Context) error { func (plugin *Plugin) initL4(ctx context.Context) error { plugin.Log.Infof("Init L4 plugin") - // Application namespace conifgurator + // Application namespace configurator plugin.appNsConfigurator = &l4plugin.AppNsConfigurator{} if err := plugin.appNsConfigurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch); err != nil { return err @@ -586,22 +584,9 @@ func (plugin *Plugin) initL4(ctx context.Context) error { func (plugin *Plugin) initSR(ctx context.Context) (err error) { plugin.Log.Infof("Init SR plugin") - // logger - srLogger := plugin.Log.NewLogger("-sr-plugin") - - var stopwatch *measure.Stopwatch - if plugin.enableStopwatch { - stopwatch = measure.NewStopwatch("SRConfigurator", srLogger) - } - // configuring configurators - plugin.srv6Configurator = &srplugin.SRv6Configurator{ - Log: srLogger, - GoVppmux: plugin.GoVppmux, - SwIfIndexes: plugin.swIfIndexes, - VppCalls: vppcalls.NewSRv6Calls(srLogger, stopwatch), - } - // Init SR plugin - if err := plugin.srv6Configurator.Init(); err != nil { + // Init SR configurator + plugin.srv6Configurator = &srplugin.SRv6Configurator{} + if err := plugin.srv6Configurator.Init(plugin.Log, plugin.GoVppmux, plugin.swIfIndexes, plugin.enableStopwatch, nil); err != nil { return err } diff --git a/plugins/vpp/srplugin/data_resync.go b/plugins/vpp/srplugin/data_resync.go index 84e69485c1..02e1dd92d5 100644 --- a/plugins/vpp/srplugin/data_resync.go +++ b/plugins/vpp/srplugin/data_resync.go @@ -31,7 +31,7 @@ type NamedSteering struct { // Resync writes missing segment routing configs to the VPP and removes obsolete ones. func (plugin *SRv6Configurator) Resync(localSids []*srv6.LocalSID, policies []*srv6.Policy, namedSegments []*NamedPolicySegment, namedSteerings []*NamedSteering) error { - plugin.Log.Debug("RESYNC SR begin.") + plugin.log.Debug("RESYNC SR begin.") // Re-initialize cache plugin.clearMapping() @@ -40,32 +40,32 @@ func (plugin *SRv6Configurator) Resync(localSids []*srv6.LocalSID, policies []*s for _, localsid := range localSids { if err := plugin.AddLocalSID(localsid); err != nil { - plugin.Log.Error(err) + plugin.log.Error(err) continue } } for _, policy := range policies { if err := plugin.AddPolicy(policy); err != nil { - plugin.Log.Error(err) + plugin.log.Error(err) continue } } for _, namedSegment := range namedSegments { if err := plugin.AddPolicySegment(namedSegment.Name, namedSegment.Segment); err != nil { - plugin.Log.Error(err) + plugin.log.Error(err) continue } } for _, namedSteering := range namedSteerings { if err := plugin.AddSteering(namedSteering.Name, namedSteering.Steering); err != nil { - plugin.Log.Error(err) + plugin.log.Error(err) continue } } - plugin.Log.Debug("RESYNC SR end.") + plugin.log.Debug("RESYNC SR end.") return nil } diff --git a/plugins/vpp/srplugin/srv6_config.go b/plugins/vpp/srplugin/srv6_config.go index 5854c09402..73f4c184e4 100644 --- a/plugins/vpp/srplugin/srv6_config.go +++ b/plugins/vpp/srplugin/srv6_config.go @@ -24,6 +24,7 @@ import ( govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/cn-infra/utils/safeclose" "github.com/ligato/vpp-agent/idxvpp" "github.com/ligato/vpp-agent/idxvpp/nametoidx" @@ -40,13 +41,14 @@ import ( // modelled by the proto file "../model/srv6/srv6.proto" and stored in ETCD under the key "/vnf-agent/{vnf-agent}/vpp/config/v1/srv6". type SRv6Configurator struct { // injectable/public fields - Log logging.Logger - GoVppmux govppmux.API - SwIfIndexes ifaceidx.SwIfIndex // SwIfIndexes from default plugins - VppCalls vppcalls.SRv6Calls + log logging.Logger + swIfIndexes ifaceidx.SwIfIndex // SwIfIndexes from default plugins // channels - Channel govppapi.Channel // channel to communicate with VPP + vppChan govppapi.Channel // channel to communicate with VPP + + // vpp api handler + srHandler vppcalls.SRv6VppAPI // caches policyCache *cache.PolicyCache // Cache for SRv6 policies @@ -59,43 +61,67 @@ type SRv6Configurator struct { policyIndexes idxvpp.NameToIdxRW // Mapping between policy bsid and index inside VPP policySegmentIndexSeq *gaplessSequence policySegmentIndexes idxvpp.NameToIdxRW // Mapping between policy segment name as defined in ETCD key and index inside VPP + + // stopwatch + stopwatch *measure.Stopwatch } // Init members -func (plugin *SRv6Configurator) Init() (err error) { +func (plugin *SRv6Configurator) Init(logger logging.PluginLogger, goVppMux govppmux.API, swIfIndexes ifaceidx.SwIfIndex, + enableStopwatch bool, srHandler vppcalls.SRv6VppAPI) (err error) { + // Logger + plugin.log = logger.NewLogger("-sr-plugin") + // NewAPIChannel returns a new API channel for communication with VPP via govpp core. // It uses default buffer sizes for the request and reply Go channels. - plugin.Channel, err = plugin.GoVppmux.NewAPIChannel() + plugin.vppChan, err = goVppMux.NewAPIChannel() if err != nil { return } + // Init stopwatch + if enableStopwatch { + plugin.stopwatch = measure.NewStopwatch("SRConfigurator", plugin.log) + } + + // VPP API handler + if srHandler != nil { + plugin.srHandler = srHandler + } else { + if plugin.srHandler, err = vppcalls.NewSRv6VppHandler(plugin.vppChan, plugin.log, plugin.stopwatch); err != nil { + return err + } + } + + // Interface indexes + plugin.swIfIndexes = swIfIndexes + // Create caches - plugin.policyCache = cache.NewPolicyCache(plugin.Log) - plugin.policySegmentsCache = cache.NewPolicySegmentCache(plugin.Log) - plugin.steeringCache = cache.NewSteeringCache(plugin.Log) + plugin.policyCache = cache.NewPolicyCache(plugin.log) + plugin.policySegmentsCache = cache.NewPolicySegmentCache(plugin.log) + plugin.steeringCache = cache.NewSteeringCache(plugin.log) plugin.createdPolicies = make(map[string]struct{}) // Create indexes plugin.policySegmentIndexSeq = newSequence() - plugin.policySegmentIndexes = nametoidx.NewNameToIdx(plugin.Log, "policy-segment-indexes", nil) + plugin.policySegmentIndexes = nametoidx.NewNameToIdx(plugin.log, "policy-segment-indexes", nil) plugin.policyIndexSeq = newSequence() - plugin.policyIndexes = nametoidx.NewNameToIdx(plugin.Log, "policy-indexes", nil) + plugin.policyIndexes = nametoidx.NewNameToIdx(plugin.log, "policy-indexes", nil) return } // Close closes GOVPP channel func (plugin *SRv6Configurator) Close() error { - return safeclose.Close(plugin.Channel) + return safeclose.Close(plugin.vppChan) } // clearMapping prepares all in-memory-mappings and other cache fields. All previous cached entries are removed. func (plugin *SRv6Configurator) clearMapping() { // Clear caches - plugin.policyCache = cache.NewPolicyCache(plugin.Log) - plugin.policySegmentsCache = cache.NewPolicySegmentCache(plugin.Log) - plugin.steeringCache = cache.NewSteeringCache(plugin.Log) + plugin.policyCache = cache.NewPolicyCache(plugin.log) + plugin.policySegmentsCache = cache.NewPolicySegmentCache(plugin.log) + plugin.steeringCache = cache.NewSteeringCache(plugin.log) plugin.createdPolicies = make(map[string]struct{}) // Clear indexes @@ -111,7 +137,7 @@ func (plugin *SRv6Configurator) AddLocalSID(value *srv6.LocalSID) error { if err != nil { return fmt.Errorf("sid should be valid ipv6 address: %v", err) } - return plugin.VppCalls.AddLocalSid(sid, value, plugin.SwIfIndexes, plugin.Channel) + return plugin.srHandler.AddLocalSid(sid, value, plugin.swIfIndexes) } // DeleteLocalSID removes Local SID from VPP using VPP's binary api @@ -120,7 +146,7 @@ func (plugin *SRv6Configurator) DeleteLocalSID(value *srv6.LocalSID) error { if err != nil { return fmt.Errorf("sid should be valid ipv6 address: %v", err) } - return plugin.VppCalls.DeleteLocalSid(sid, plugin.Channel) + return plugin.srHandler.DeleteLocalSid(sid) } // ModifyLocalSID modifies Local SID from to in VPP using VPP's binary api @@ -145,13 +171,13 @@ func (plugin *SRv6Configurator) AddPolicy(policy *srv6.Policy) error { plugin.policyCache.Put(bsid, policy) segments, segmentNames := plugin.policySegmentsCache.LookupByPolicy(bsid) if len(segments) == 0 { - plugin.Log.Debugf("addition of policy (%v) postponed until first policy segment is defined for it", bsid.String()) + plugin.log.Debugf("addition of policy (%v) postponed until first policy segment is defined for it", bsid.String()) return nil } plugin.addPolicyToIndexes(bsid) plugin.addSegmentToIndexes(bsid, segmentNames[0]) - err = plugin.VppCalls.AddPolicy(bsid, policy, segments[0], plugin.Channel) + err = plugin.srHandler.AddPolicy(bsid, policy, segments[0]) if err != nil { return fmt.Errorf("can't write policy (%v) with first segment (%v): %v", bsid, segments[0].Segments, err) } @@ -211,7 +237,7 @@ func (plugin *SRv6Configurator) RemovePolicy(policy *srv6.Policy) error { plugin.policySegmentIndexSeq.delete(index) } } - return plugin.VppCalls.DeletePolicy(bsid, plugin.Channel) // expecting that policy delete will also delete policy segments in vpp + return plugin.srHandler.DeletePolicy(bsid) // expecting that policy delete will also delete policy segments in vpp } // ModifyPolicy modifies policy in VPP using VPP's binary api @@ -247,7 +273,7 @@ func (plugin *SRv6Configurator) AddPolicySegment(segmentName string, policySegme plugin.policySegmentsCache.Put(bsid, segmentName, policySegment) policy, exists := plugin.policyCache.GetValue(bsid) if !exists { - plugin.Log.Debugf("addition of policy segment (%v) postponed until policy with %v bsid is created", policySegment.GetSegments(), bsid.String()) + plugin.log.Debugf("addition of policy segment (%v) postponed until policy with %v bsid is created", policySegment.GetSegments(), bsid.String()) return nil } @@ -266,7 +292,7 @@ func (plugin *SRv6Configurator) AddPolicySegment(segmentName string, policySegme } // FIXME there is no API contract saying what happens to VPP indexes if addition fails (also different fail code can rollback or not rollback indexes) => no way how to handle this without being dependent on internal implementation inside VPP and that is just very fragile -> API should tell this but it doesn't! plugin.addSegmentToIndexes(bsid, segmentName) - return plugin.VppCalls.AddPolicySegment(bsid, policy, policySegment, plugin.Channel) + return plugin.srHandler.AddPolicySegment(bsid, policy, policySegment) } // RemovePolicySegment removes policy segment with name from referenced policy in VPP using @@ -282,7 +308,7 @@ func (plugin *SRv6Configurator) RemovePolicySegment(segmentName string, policySe siblings, _ := plugin.policySegmentsCache.LookupByPolicy(bsid) // sibling segments in the same policy if len(siblings) == 0 { // last segment for policy - plugin.Log.Debugf("removal of policy segment (%v) postponed until policy with %v bsid is deleted", policySegment.GetSegments(), bsid.String()) + plugin.log.Debugf("removal of policy segment (%v) postponed until policy with %v bsid is deleted", policySegment.GetSegments(), bsid.String()) return nil } @@ -296,7 +322,7 @@ func (plugin *SRv6Configurator) RemovePolicySegment(segmentName string, policySe } // FIXME there is no API contract saying what happens to VPP indexes if removal fails (also different fail code can rollback or not rollback indexes) => no way how to handle this without being dependent on internal implementation inside VPP and that is just very fragile -> API should tell this but it doesn't! plugin.policySegmentIndexSeq.delete(index) - return plugin.VppCalls.DeletePolicySegment(bsid, policy, policySegment, index, plugin.Channel) + return plugin.srHandler.DeletePolicySegment(bsid, policy, policySegment, index) } // ModifyPolicySegment modifies existing policy segment with name from to in referenced policy. @@ -356,7 +382,7 @@ func (plugin *SRv6Configurator) AddSteering(name string, steering *srv6.Steering var exists bool bsidStr, _, exists = plugin.policyIndexes.LookupName(steering.PolicyIndex) if !exists { - plugin.Log.Debugf("addition of steering (index %v) postponed until referenced policy is defined", steering.PolicyIndex) + plugin.log.Debugf("addition of steering (index %v) postponed until referenced policy is defined", steering.PolicyIndex) return nil } } @@ -366,17 +392,17 @@ func (plugin *SRv6Configurator) AddSteering(name string, steering *srv6.Steering return fmt.Errorf("can't parse policy BSID string ('%v') into IPv6 address", steering.PolicyBsid) } if _, exists := plugin.policyCache.GetValue(bsid); !exists { - plugin.Log.Debugf("addition of steering (bsid %v) postponed until referenced policy is defined", name) + plugin.log.Debugf("addition of steering (bsid %v) postponed until referenced policy is defined", name) return nil } - return plugin.VppCalls.AddSteering(steering, plugin.SwIfIndexes, plugin.Channel) + return plugin.srHandler.AddSteering(steering, plugin.swIfIndexes) } // RemoveSteering removes steering from VPP using VPP's binary api func (plugin *SRv6Configurator) RemoveSteering(name string, steering *srv6.Steering) error { plugin.steeringCache.Delete(name) - return plugin.VppCalls.RemoveSteering(steering, plugin.SwIfIndexes, plugin.Channel) + return plugin.srHandler.RemoveSteering(steering, plugin.swIfIndexes) } // ModifySteering modifies existing steering in VPP using VPP's binary api diff --git a/plugins/vpp/srplugin/srv6_config_test.go b/plugins/vpp/srplugin/srv6_config_test.go index 5a04c115a7..a85c1ce4e8 100644 --- a/plugins/vpp/srplugin/srv6_config_test.go +++ b/plugins/vpp/srplugin/srv6_config_test.go @@ -912,13 +912,8 @@ func srv6TestSetup(t *testing.T) (*srplugin.SRv6Configurator, *vppcallfake.SRv6C swIndex := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "sw_if_indexes", ifaceidx.IndexMetadata)) // Configurator fakeVPPCalls := vppcallfake.NewSRv6Calls() - configurator := &srplugin.SRv6Configurator{ - Log: log, - GoVppmux: connection, - SwIfIndexes: swIndex, - VppCalls: fakeVPPCalls, - } - err = configurator.Init() + configurator := &srplugin.SRv6Configurator{} + err = configurator.Init(log, connection, swIndex, false, fakeVPPCalls) Expect(err).To(BeNil()) return configurator, fakeVPPCalls, connection diff --git a/plugins/vpp/srplugin/vppcalls/api_vppcalls.go b/plugins/vpp/srplugin/vppcalls/api_vppcalls.go new file mode 100644 index 0000000000..48062c35b8 --- /dev/null +++ b/plugins/vpp/srplugin/vppcalls/api_vppcalls.go @@ -0,0 +1,79 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vppcalls + +import ( + "net" + + govppapi "git.fd.io/govpp.git/api" + "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/logging/measure" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" + "github.com/ligato/vpp-agent/plugins/vpp/model/srv6" +) + +// SRv6VppAPI is API boundary for vppcall package access, introduced to properly test code dependent on vppcalls package +type SRv6VppAPI interface { + SRv6VPPWrite + SRv6VPPRead +} + +// SRv6VPPWrite provides write methods for segment routing +type SRv6VPPWrite interface { + // AddLocalSid adds local sid given by and into VPP + AddLocalSid(sidAddr net.IP, localSID *srv6.LocalSID, swIfIndex ifaceidx.SwIfIndex) error + // DeleteLocalSid delets local sid given by in VPP + DeleteLocalSid(sidAddr net.IP) error + // SetEncapsSourceAddress sets for SRv6 in VPP the source address used for encapsulated packet + SetEncapsSourceAddress(address string) error + // AddPolicy adds SRv6 policy given by identified ,initial segment for policy and other policy settings in + AddPolicy(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment) error + // DeletePolicy deletes SRv6 policy given by binding SID + DeletePolicy(bindingSid net.IP) error + // AddPolicySegment adds segment to SRv6 policy that has policy BSID + AddPolicySegment(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment) error + // DeletePolicySegment removes segment (with segment index ) from SRv6 policy that has policy BSID + DeletePolicySegment(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment, segmentIndex uint32) error + // AddSteering sets in VPP steering into SRv6 policy. + AddSteering(steering *srv6.Steering, swIfIndex ifaceidx.SwIfIndex) error + // RemoveSteering removes in VPP steering into SRv6 policy. + RemoveSteering(steering *srv6.Steering, swIfIndex ifaceidx.SwIfIndex) error +} + +// SRv6VPPRead provides read methods for segment routing +type SRv6VPPRead interface { + // Todo implement dump methods +} + +// srv6VppHandler is accessor for SRv6-related vppcalls methods +type srv6VppHandler struct { + log logging.Logger + callsChannel govppapi.Channel + stopwatch *measure.Stopwatch +} + +// NewSRv6VppHandler creates new instance of SRv6 vppcalls handler +func NewSRv6VppHandler(vppChan govppapi.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*srv6VppHandler, error) { + handler := &srv6VppHandler{ + callsChannel: vppChan, + log: log, + stopwatch: stopwatch, + } + if err := handler.callsChannel.CheckMessageCompatibility(SrMessages...); err != nil { + return nil, err + } + + return handler, nil +} diff --git a/plugins/vpp/srplugin/vppcalls/srv6.go b/plugins/vpp/srplugin/vppcalls/srv6.go index ba4d5d955d..0d139ebea5 100644 --- a/plugins/vpp/srplugin/vppcalls/srv6.go +++ b/plugins/vpp/srplugin/vppcalls/srv6.go @@ -22,12 +22,27 @@ import ( govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/binapi/sr" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/model/srv6" ) +// SRMessages checks if segment routing CRSs are compatible with VPP in runtime. +var SrMessages = []govppapi.Message{ + &sr.SrLocalsidAddDel{}, + &sr.SrLocalsidAddDelReply{}, + &sr.SrSetEncapSource{}, + &sr.SrSetEncapSourceReply{}, + &sr.SrPolicyAdd{}, + &sr.SrPolicyAddReply{}, + &sr.SrPolicyMod{}, + &sr.SrPolicyModReply{}, + &sr.SrPolicyDel{}, + &sr.SrPolicyModReply{}, + &sr.SrSteeringAddDel{}, + &sr.SrSteeringAddDelReply{}, +} + // Constants for behavior function hardcoded into VPP (there can be also custom behavior functions implemented as VPP plugins) // Constants are taken from VPP's vnet/srv6/sr.h (names are modified to Golang from original C form in VPP code) const ( @@ -58,56 +73,21 @@ const ( ModifyWeightOfSRList // Modify the weight of an existing SR List ) -// SRv6Calls is API boundary for vppcall package access, introduced to properly test code dependent on vppcalls package -type SRv6Calls interface { - // AddLocalSid adds local sid given by and into VPP - AddLocalSid(sidAddr net.IP, localSID *srv6.LocalSID, swIfIndex ifaceidx.SwIfIndex, vppChan govppapi.Channel) error - // DeleteLocalSid delets local sid given by in VPP - DeleteLocalSid(sidAddr net.IP, vppChan govppapi.Channel) error - // SetEncapsSourceAddress sets for SRv6 in VPP the source address used for encapsulated packet - SetEncapsSourceAddress(address string, vppChan govppapi.Channel) error - // AddPolicy adds SRv6 policy given by identified ,initial segment for policy and other policy settings in - AddPolicy(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment, vppChan govppapi.Channel) error - // DeletePolicy deletes SRv6 policy given by binding SID - DeletePolicy(bindingSid net.IP, vppChan govppapi.Channel) error - // AddPolicySegment adds segment to SRv6 policy that has policy BSID - AddPolicySegment(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment, vppChan govppapi.Channel) error - // DeletePolicySegment removes segment (with segment index ) from SRv6 policy that has policy BSID - DeletePolicySegment(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment, segmentIndex uint32, vppChan govppapi.Channel) error - // AddSteering sets in VPP steering into SRv6 policy. - AddSteering(steering *srv6.Steering, swIfIndex ifaceidx.SwIfIndex, vppChan govppapi.Channel) error - // RemoveSteering removes in VPP steering into SRv6 policy. - RemoveSteering(steering *srv6.Steering, swIfIndex ifaceidx.SwIfIndex, vppChan govppapi.Channel) error -} - -type srv6Calls struct { - log logging.Logger - stopwatch *measure.Stopwatch -} - -// NewSRv6Calls creates implementation of SRv6Calls interface -func NewSRv6Calls(log logging.Logger, stopwatch *measure.Stopwatch) SRv6Calls { - return &srv6Calls{ - log: log, - stopwatch: stopwatch, - } -} - // AddLocalSid adds local sid given by and into VPP -func (calls *srv6Calls) AddLocalSid(sidAddr net.IP, localSID *srv6.LocalSID, swIfIndex ifaceidx.SwIfIndex, vppChan govppapi.Channel) error { - return calls.addDelLocalSid(false, sidAddr, localSID, swIfIndex, vppChan) +func (handler *srv6VppHandler) AddLocalSid(sidAddr net.IP, localSID *srv6.LocalSID, swIfIndex ifaceidx.SwIfIndex) error { + return handler.addDelLocalSid(false, sidAddr, localSID, swIfIndex) } // DeleteLocalSid delets local sid given by in VPP -func (calls *srv6Calls) DeleteLocalSid(sidAddr net.IP, vppChan govppapi.Channel) error { - return calls.addDelLocalSid(true, sidAddr, nil, nil, vppChan) +func (handler *srv6VppHandler) DeleteLocalSid(sidAddr net.IP) error { + return handler.addDelLocalSid(true, sidAddr, nil, nil) } -func (calls *srv6Calls) addDelLocalSid(deletion bool, sidAddr net.IP, localSID *srv6.LocalSID, swIfIndex ifaceidx.SwIfIndex, vppChan govppapi.Channel) error { - calls.log.WithFields(logging.Fields{"localSID": sidAddr, "delete": deletion, "FIB table ID": calls.fibTableID(localSID), "end function": calls.endFunction(localSID)}). +func (handler *srv6VppHandler) addDelLocalSid(deletion bool, sidAddr net.IP, localSID *srv6.LocalSID, swIfIndex ifaceidx.SwIfIndex) error { + handler.log.WithFields(logging.Fields{"localSID": sidAddr, "delete": deletion, "FIB table ID": handler.fibTableID(localSID), "end function": handler.endFunction(localSID)}). Debug("Adding/deleting Local SID", sidAddr) defer func(t time.Time) { - calls.stopwatch.TimeLog(sr.SrLocalsidAddDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(sr.SrLocalsidAddDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &sr.SrLocalsidAddDel{ @@ -116,33 +96,33 @@ func (calls *srv6Calls) addDelLocalSid(deletion bool, sidAddr net.IP, localSID * } if !deletion { req.FibTable = localSID.FibTableId // where to install localsid entry - if err := calls.writeEndFunction(req, sidAddr, localSID, swIfIndex); err != nil { + if err := handler.writeEndFunction(req, sidAddr, localSID, swIfIndex); err != nil { return err } } reply := &sr.SrLocalsidAddDelReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { return fmt.Errorf("vpp call %q returned: %d", reply.GetMessageName(), reply.Retval) } - calls.log.WithFields(logging.Fields{"localSID": sidAddr, "delete": deletion, "FIB table ID": calls.fibTableID(localSID), "end function": calls.endFunction(localSID)}). + handler.log.WithFields(logging.Fields{"localSID": sidAddr, "delete": deletion, "FIB table ID": handler.fibTableID(localSID), "end function": handler.endFunction(localSID)}). Debug("Added/deleted Local SID ", sidAddr) return nil } -func (calls *srv6Calls) fibTableID(localSID *srv6.LocalSID) string { +func (handler *srv6VppHandler) fibTableID(localSID *srv6.LocalSID) string { if localSID != nil { return string(localSID.FibTableId) } return "" } -func (calls *srv6Calls) endFunction(localSID *srv6.LocalSID) string { +func (handler *srv6VppHandler) endFunction(localSID *srv6.LocalSID) string { if localSID == nil { return "" } else if localSID.BaseEndFunction != nil { @@ -165,7 +145,7 @@ func (calls *srv6Calls) endFunction(localSID *srv6.LocalSID) string { return "unknown end function" } -func (calls *srv6Calls) writeEndFunction(req *sr.SrLocalsidAddDel, sidAddr net.IP, localSID *srv6.LocalSID, swIfIndex ifaceidx.SwIfIndex) error { +func (handler *srv6VppHandler) writeEndFunction(req *sr.SrLocalsidAddDel, sidAddr net.IP, localSID *srv6.LocalSID, swIfIndex ifaceidx.SwIfIndex) error { if localSID.BaseEndFunction != nil { req.Behavior = BehaviorEnd req.EndPsp = boolToUint(localSID.BaseEndFunction.Psp) @@ -245,10 +225,10 @@ func (calls *srv6Calls) writeEndFunction(req *sr.SrLocalsidAddDel, sidAddr net.I } // SetEncapsSourceAddress sets for SRv6 in VPP the source address used for encapsulated packet -func (calls *srv6Calls) SetEncapsSourceAddress(address string, vppChan govppapi.Channel) error { - calls.log.Debugf("Configuring encapsulation source address to address %v", address) +func (handler *srv6VppHandler) SetEncapsSourceAddress(address string) error { + handler.log.Debugf("Configuring encapsulation source address to address %v", address) defer func(t time.Time) { - calls.stopwatch.TimeLog(sr.SrSetEncapSource{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(sr.SrSetEncapSource{}).LogTimeEntry(time.Since(t)) }(time.Now()) ipAddress, err := parseIPv6(address) @@ -260,27 +240,27 @@ func (calls *srv6Calls) SetEncapsSourceAddress(address string, vppChan govppapi. } reply := &sr.SrSetEncapSourceReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { return fmt.Errorf("vpp call %q returned: %d", reply.GetMessageName(), reply.Retval) } - calls.log.WithFields(logging.Fields{"Encapsulation source address": address}). + handler.log.WithFields(logging.Fields{"Encapsulation source address": address}). Debug("Encapsulation source address configured.") return nil } // AddPolicy adds SRv6 policy given by identified ,initial segment for policy and other policy settings in -func (calls *srv6Calls) AddPolicy(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment, vppChan govppapi.Channel) error { - calls.log.Debugf("Adding SR policy with binding SID %v and list of next SIDs %v", bindingSid, policySegment.Segments) +func (handler *srv6VppHandler) AddPolicy(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment) error { + handler.log.Debugf("Adding SR policy with binding SID %v and list of next SIDs %v", bindingSid, policySegment.Segments) defer func(t time.Time) { - calls.stopwatch.TimeLog(sr.SrPolicyAdd{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(sr.SrPolicyAdd{}).LogTimeEntry(time.Since(t)) }(time.Now()) - sids, err := calls.convertPolicySegment(policySegment) + sids, err := handler.convertPolicySegment(policySegment) if err != nil { return err } @@ -294,24 +274,24 @@ func (calls *srv6Calls) AddPolicy(bindingSid net.IP, policy *srv6.Policy, policy } reply := &sr.SrPolicyAddReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { return fmt.Errorf("vpp call %q returned: %d", reply.GetMessageName(), reply.Retval) } - calls.log.WithFields(logging.Fields{"binding SID": bindingSid, "list of next SIDs": policySegment.Segments}). + handler.log.WithFields(logging.Fields{"binding SID": bindingSid, "list of next SIDs": policySegment.Segments}). Debug("SR policy added") return nil } // DeletePolicy deletes SRv6 policy given by binding SID -func (calls *srv6Calls) DeletePolicy(bindingSid net.IP, vppChan govppapi.Channel) error { - calls.log.Debugf("Deleting SR policy with binding SID %v ", bindingSid) +func (handler *srv6VppHandler) DeletePolicy(bindingSid net.IP) error { + handler.log.Debugf("Deleting SR policy with binding SID %v ", bindingSid) defer func(t time.Time) { - calls.stopwatch.TimeLog(sr.SrPolicyDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(sr.SrPolicyDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) req := &sr.SrPolicyDel{ @@ -319,49 +299,49 @@ func (calls *srv6Calls) DeletePolicy(bindingSid net.IP, vppChan govppapi.Channel } reply := &sr.SrPolicyDelReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { return fmt.Errorf("vpp call %q returned: %d", reply.GetMessageName(), reply.Retval) } - calls.log.WithFields(logging.Fields{"binding SID": bindingSid}). + handler.log.WithFields(logging.Fields{"binding SID": bindingSid}). Debug("SR policy deleted") return nil } // AddPolicySegment adds segment to SRv6 policy that has policy BSID -func (calls *srv6Calls) AddPolicySegment(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment, vppChan govppapi.Channel) error { - calls.log.Debugf("Adding segment %v to SR policy with binding SID %v", policySegment.Segments, bindingSid) - err := calls.modPolicy(AddSRList, bindingSid, policy, policySegment, 0, vppChan) +func (handler *srv6VppHandler) AddPolicySegment(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment) error { + handler.log.Debugf("Adding segment %v to SR policy with binding SID %v", policySegment.Segments, bindingSid) + err := handler.modPolicy(AddSRList, bindingSid, policy, policySegment, 0) if err == nil { - calls.log.WithFields(logging.Fields{"binding SID": bindingSid, "list of next SIDs": policySegment.Segments}). + handler.log.WithFields(logging.Fields{"binding SID": bindingSid, "list of next SIDs": policySegment.Segments}). Debug("SR policy modified(added another segment list)") } return err } // DeletePolicySegment removes segment (with segment index ) from SRv6 policy that has policy BSID -func (calls *srv6Calls) DeletePolicySegment(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment, - segmentIndex uint32, vppChan govppapi.Channel) error { - calls.log.Debugf("Removing segment %v (index %v) from SR policy with binding SID %v", policySegment.Segments, segmentIndex, bindingSid) - err := calls.modPolicy(DeleteSRList, bindingSid, policy, policySegment, segmentIndex, vppChan) +func (handler *srv6VppHandler) DeletePolicySegment(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment, + segmentIndex uint32) error { + handler.log.Debugf("Removing segment %v (index %v) from SR policy with binding SID %v", policySegment.Segments, segmentIndex, bindingSid) + err := handler.modPolicy(DeleteSRList, bindingSid, policy, policySegment, segmentIndex) if err == nil { - calls.log.WithFields(logging.Fields{"binding SID": bindingSid, "list of next SIDs": policySegment.Segments, "segmentIndex": segmentIndex}). + handler.log.WithFields(logging.Fields{"binding SID": bindingSid, "list of next SIDs": policySegment.Segments, "segmentIndex": segmentIndex}). Debug("SR policy modified(removed segment list)") } return err } -func (calls *srv6Calls) modPolicy(operation uint8, bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment, - segmentIndex uint32, vppChan govppapi.Channel) error { +func (handler *srv6VppHandler) modPolicy(operation uint8, bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment, + segmentIndex uint32) error { defer func(t time.Time) { - calls.stopwatch.TimeLog(sr.SrPolicyMod{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(sr.SrPolicyMod{}).LogTimeEntry(time.Since(t)) }(time.Now()) - sids, err := calls.convertPolicySegment(policySegment) + sids, err := handler.convertPolicySegment(policySegment) if err != nil { return err } @@ -378,7 +358,7 @@ func (calls *srv6Calls) modPolicy(operation uint8, bindingSid net.IP, policy *sr reply := &sr.SrPolicyModReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { @@ -387,7 +367,7 @@ func (calls *srv6Calls) modPolicy(operation uint8, bindingSid net.IP, policy *sr return nil } -func (calls *srv6Calls) convertPolicySegment(policySegment *srv6.PolicySegment) (*sr.Srv6SidList, error) { +func (handler *srv6VppHandler) convertPolicySegment(policySegment *srv6.PolicySegment) (*sr.Srv6SidList, error) { var segments []sr.Srv6Sid for _, sid := range policySegment.Segments { // parse to IPv6 address @@ -411,20 +391,18 @@ func (calls *srv6Calls) convertPolicySegment(policySegment *srv6.PolicySegment) } // AddSteering sets in VPP steering into SRv6 policy. -func (calls *srv6Calls) AddSteering(steering *srv6.Steering, swIfIndex ifaceidx.SwIfIndex, - vppChan govppapi.Channel) error { - return calls.addDelSteering(false, steering, swIfIndex, vppChan) +func (handler *srv6VppHandler) AddSteering(steering *srv6.Steering, swIfIndex ifaceidx.SwIfIndex) error { + return handler.addDelSteering(false, steering, swIfIndex) } // RemoveSteering removes in VPP steering into SRv6 policy. -func (calls *srv6Calls) RemoveSteering(steering *srv6.Steering, swIfIndex ifaceidx.SwIfIndex, - vppChan govppapi.Channel) error { - return calls.addDelSteering(true, steering, swIfIndex, vppChan) +func (handler *srv6VppHandler) RemoveSteering(steering *srv6.Steering, swIfIndex ifaceidx.SwIfIndex) error { + return handler.addDelSteering(true, steering, swIfIndex) } -func (calls *srv6Calls) addDelSteering(delete bool, steering *srv6.Steering, swIfIndex ifaceidx.SwIfIndex, vppChan govppapi.Channel) error { +func (handler *srv6VppHandler) addDelSteering(delete bool, steering *srv6.Steering, swIfIndex ifaceidx.SwIfIndex) error { defer func(t time.Time) { - calls.stopwatch.TimeLog(sr.SrSteeringAddDel{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(sr.SrSteeringAddDel{}).LogTimeEntry(time.Since(t)) }(time.Now()) // defining operation strings for logging @@ -435,10 +413,10 @@ func (calls *srv6Calls) addDelSteering(delete bool, steering *srv6.Steering, swI // logging info about operation with steering if steering.L3Traffic != nil { - calls.log.Debugf("%v steering for l3 traffic with destination %v to SR policy (binding SID %v, policy index %v)", + handler.log.Debugf("%v steering for l3 traffic with destination %v to SR policy (binding SID %v, policy index %v)", operationProgressing, steering.L3Traffic.PrefixAddress, steering.PolicyBsid, steering.PolicyIndex) } else { - calls.log.Debugf("%v steering for l2 traffic from interface %v to SR policy (binding SID %v, policy index %v)", + handler.log.Debugf("%v steering for l2 traffic from interface %v to SR policy (binding SID %v, policy index %v)", operationProgressing, steering.L2Traffic.InterfaceName, steering.PolicyBsid, steering.PolicyIndex) } @@ -490,14 +468,14 @@ func (calls *srv6Calls) addDelSteering(delete bool, steering *srv6.Steering, swI } reply := &sr.SrSteeringAddDelReply{} - if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + if err := handler.callsChannel.SendRequest(req).ReceiveReply(reply); err != nil { return err } if reply.Retval != 0 { return fmt.Errorf("vpp call %q returned: %d", reply.GetMessageName(), reply.Retval) } - calls.log.WithFields(logging.Fields{"steer type": steerType, "L3 prefix address bytes": prefixAddr, + handler.log.WithFields(logging.Fields{"steer type": steerType, "L3 prefix address bytes": prefixAddr, "L2 interface index": intIndex, "policy binding SID": bsidAddr, "policy index": steering.PolicyIndex}). Debugf("%v steering to SR policy ", operationFinished) diff --git a/plugins/vpp/srplugin/vppcalls/srv6_test.go b/plugins/vpp/srplugin/vppcalls/srv6_test.go index da2d0ddcc5..197df900f1 100644 --- a/plugins/vpp/srplugin/vppcalls/srv6_test.go +++ b/plugins/vpp/srplugin/vppcalls/srv6_test.go @@ -379,7 +379,7 @@ func TestAddLocalSID(t *testing.T) { ctx.MockVpp.MockReply(&sr.SrLocalsidAddDelReply{}) } // make the call - err := vppCalls.AddLocalSid(sidA.Addr, td.Input, swIfIndex, ctx.MockChannel) + err := vppCalls.AddLocalSid(sidA.Addr, td.Input, swIfIndex) // verify result if td.ExpectFailure { Expect(err).Should(HaveOccurred()) @@ -435,10 +435,10 @@ func TestDeleteLocalSID(t *testing.T) { Psp: true, }, } - vppCalls.AddLocalSid(td.Sid, localsid, swIfIndex, ctx.MockChannel) + vppCalls.AddLocalSid(td.Sid, localsid, swIfIndex) ctx.MockVpp.MockReply(td.MockReply) // make the call and verify - err := vppCalls.DeleteLocalSid(td.Sid, ctx.MockChannel) + err := vppCalls.DeleteLocalSid(td.Sid) td.Verify(err, ctx.MockChannel.Msg) }) } @@ -490,7 +490,7 @@ func TestSetEncapsSourceAddress(t *testing.T) { defer teardown(ctx) ctx.MockVpp.MockReply(td.MockReply) - err := vppCalls.SetEncapsSourceAddress(td.Address, ctx.MockChannel) + err := vppCalls.SetEncapsSourceAddress(td.Address) td.Verify(err, ctx.MockChannel.Msg) }) } @@ -561,7 +561,7 @@ func TestAddPolicy(t *testing.T) { defer teardown(ctx) // prepare reply, make call and verify ctx.MockVpp.MockReply(td.MockReply) - err := vppCalls.AddPolicy(td.BSID, td.Policy, td.PolicySegment, ctx.MockChannel) + err := vppCalls.AddPolicy(td.BSID, td.Policy, td.PolicySegment) td.Verify(err, ctx.MockChannel.Msg) }) } @@ -605,10 +605,10 @@ func TestDeletePolicy(t *testing.T) { // data and prepare case policy := policy(0, true, true) segment := policySegment(1, sidA.Addr, sidB.Addr, sidC.Addr) - vppCalls.AddPolicy(td.BSID, policy, segment, ctx.MockChannel) + vppCalls.AddPolicy(td.BSID, policy, segment) ctx.MockVpp.MockReply(td.MockReply) // make the call and verify - err := vppCalls.DeletePolicy(td.BSID, ctx.MockChannel) + err := vppCalls.DeletePolicy(td.BSID) td.Verify(err, ctx.MockChannel.Msg) }) } @@ -677,7 +677,7 @@ func TestAddPolicySegment(t *testing.T) { defer teardown(ctx) // prepare reply, make call and verify ctx.MockVpp.MockReply(td.MockReply) - err := vppCalls.AddPolicySegment(td.BSID, td.Policy, td.PolicySegment, ctx.MockChannel) + err := vppCalls.AddPolicySegment(td.BSID, td.Policy, td.PolicySegment) td.Verify(err, ctx.MockChannel.Msg) }) } @@ -751,7 +751,7 @@ func TestDeletePolicySegment(t *testing.T) { defer teardown(ctx) // prepare reply, make call and verify ctx.MockVpp.MockReply(td.MockReply) - err := vppCalls.DeletePolicySegment(td.BSID, td.Policy, td.PolicySegment, td.SegmentIndex, ctx.MockChannel) + err := vppCalls.DeletePolicySegment(td.BSID, td.Policy, td.PolicySegment, td.SegmentIndex) td.Verify(err, ctx.MockChannel.Msg) }) } @@ -908,18 +908,19 @@ func testAddRemoveSteering(t *testing.T, removal bool) { ctx.MockVpp.MockReply(td.MockReply) var err error if removal { - err = vppCalls.RemoveSteering(td.Steering, swIfIndex, ctx.MockChannel) + err = vppCalls.RemoveSteering(td.Steering, swIfIndex) } else { - err = vppCalls.AddSteering(td.Steering, swIfIndex, ctx.MockChannel) + err = vppCalls.AddSteering(td.Steering, swIfIndex) } td.Verify(err, ctx.MockChannel.Msg) }) } } -func setup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.SRv6Calls) { +func setup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.SRv6VppAPI) { ctx := vppcallmock.SetupTestCtx(t) - vppCalls := vppcalls.NewSRv6Calls(logrus.DefaultLogger(), nil) + vppCalls, err := vppcalls.NewSRv6VppHandler(ctx.MockChannel, logrus.DefaultLogger(), nil) + Expect(err).To(BeNil()) return ctx, vppCalls } diff --git a/tests/vppcallfake/srv6calls.go b/tests/vppcallfake/srv6calls.go index e2906ff3ae..5d4da2a54f 100644 --- a/tests/vppcallfake/srv6calls.go +++ b/tests/vppcallfake/srv6calls.go @@ -20,7 +20,6 @@ import ( "net" "strings" - govppapi "git.fd.io/govpp.git/api" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/model/srv6" ) @@ -111,7 +110,7 @@ type AddSteeringFuncCall struct{} type RemoveSteeringFuncCall struct{} // AddLocalSid adds local sid given by and into VPP -func (fake *SRv6Calls) AddLocalSid(sidAddr net.IP, localSID *srv6.LocalSID, swIfIndex ifaceidx.SwIfIndex, vppChan govppapi.Channel) error { +func (fake *SRv6Calls) AddLocalSid(sidAddr net.IP, localSID *srv6.LocalSID, swIfIndex ifaceidx.SwIfIndex) error { if _, ok := fake.failCall.(AddLocalSidFuncCall); ok { return fake.failError } @@ -120,7 +119,7 @@ func (fake *SRv6Calls) AddLocalSid(sidAddr net.IP, localSID *srv6.LocalSID, swIf } // DeleteLocalSid delets local sid given by in VPP -func (fake *SRv6Calls) DeleteLocalSid(sidAddr net.IP, vppChan govppapi.Channel) error { +func (fake *SRv6Calls) DeleteLocalSid(sidAddr net.IP) error { if _, ok := fake.failCall.(DeleteLocalSidFuncCall); ok { return fake.failError } @@ -129,7 +128,7 @@ func (fake *SRv6Calls) DeleteLocalSid(sidAddr net.IP, vppChan govppapi.Channel) } // SetEncapsSourceAddress sets for SRv6 in VPP the source address used for encapsulated packet -func (fake *SRv6Calls) SetEncapsSourceAddress(address string, vppChan govppapi.Channel) error { +func (fake *SRv6Calls) SetEncapsSourceAddress(address string) error { if _, ok := fake.failCall.(SetEncapsSourceAddressFuncCall); ok { return fake.failError } @@ -137,7 +136,7 @@ func (fake *SRv6Calls) SetEncapsSourceAddress(address string, vppChan govppapi.C } // AddPolicy adds SRv6 policy given by identified ,initial segment for policy and other policy settings in -func (fake *SRv6Calls) AddPolicy(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment, vppChan govppapi.Channel) error { +func (fake *SRv6Calls) AddPolicy(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment) error { if _, ok := fake.failCall.(AddPolicyFuncCall); ok { return fake.failError } @@ -151,7 +150,7 @@ func (fake *SRv6Calls) AddPolicy(bindingSid net.IP, policy *srv6.Policy, policyS } // DeletePolicy deletes SRv6 policy given by binding SID -func (fake *SRv6Calls) DeletePolicy(bindingSid net.IP, vppChan govppapi.Channel) error { +func (fake *SRv6Calls) DeletePolicy(bindingSid net.IP) error { if _, ok := fake.failCall.(DeletePolicyFuncCall); ok { return fake.failError } @@ -165,7 +164,7 @@ func (fake *SRv6Calls) DeletePolicy(bindingSid net.IP, vppChan govppapi.Channel) } // AddPolicySegment adds segment to SRv6 policy that has policy BSID -func (fake *SRv6Calls) AddPolicySegment(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment, vppChan govppapi.Channel) error { +func (fake *SRv6Calls) AddPolicySegment(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment) error { if _, ok := fake.failCall.(AddPolicySegmentFuncCall); ok { return fake.failError } @@ -180,7 +179,7 @@ func (fake *SRv6Calls) AddPolicySegment(bindingSid net.IP, policy *srv6.Policy, // DeletePolicySegment removes segment (with segment index ) from SRv6 policy that has policy BSID func (fake *SRv6Calls) DeletePolicySegment(bindingSid net.IP, policy *srv6.Policy, policySegment *srv6.PolicySegment, - segmentIndex uint32, vppChan govppapi.Channel) error { + segmentIndex uint32) error { if _, ok := fake.failCall.(DeletePolicySegmentFuncCall); ok { return fake.failError } @@ -204,7 +203,7 @@ func removeSegment(segments []*srv6.PolicySegment, segment *srv6.PolicySegment) } // AddSteering sets in VPP steering into SRv6 policy. -func (fake *SRv6Calls) AddSteering(steering *srv6.Steering, swIfIndex ifaceidx.SwIfIndex, vppChan govppapi.Channel) error { +func (fake *SRv6Calls) AddSteering(steering *srv6.Steering, swIfIndex ifaceidx.SwIfIndex) error { if _, ok := fake.failCall.(AddSteeringFuncCall); ok { return fake.failError } @@ -224,7 +223,7 @@ func (fake *SRv6Calls) AddSteering(steering *srv6.Steering, swIfIndex ifaceidx.S } // RemoveSteering removes in VPP steering into SRv6 policy. -func (fake *SRv6Calls) RemoveSteering(steering *srv6.Steering, swIfIndex ifaceidx.SwIfIndex, vppChan govppapi.Channel) error { +func (fake *SRv6Calls) RemoveSteering(steering *srv6.Steering, swIfIndex ifaceidx.SwIfIndex) error { if _, ok := fake.failCall.(RemoveSteeringFuncCall); ok { return fake.failError } From 5810dcceb8170ce48be8b55d225c7a8909fda0bd Mon Sep 17 00:00:00 2001 From: Marcel Sestak Date: Tue, 24 Jul 2018 13:23:30 +0200 Subject: [PATCH 041/174] fix test Signed-off-by: Marcel Sestak --- tests/robot/libraries/setup-teardown.robot | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/robot/libraries/setup-teardown.robot b/tests/robot/libraries/setup-teardown.robot index efd4a1af35..cc78b96b20 100644 --- a/tests/robot/libraries/setup-teardown.robot +++ b/tests/robot/libraries/setup-teardown.robot @@ -69,6 +69,7 @@ Discard old results Remove Directory ${RESULTS_FOLDER_SUITE} recursive=true Create Directory ${RESULTS_FOLDER} + Log All SSH Outputs [Documentation] *Log All SSH Outputs* ... Logs all connections outputs From 5f7943b12133699989829e4201d0f62c5bf445a8 Mon Sep 17 00:00:00 2001 From: Ondrej Fabry Date: Tue, 24 Jul 2018 14:16:10 +0200 Subject: [PATCH 042/174] Add options for essential plugins to make base of vpp-agent work Signed-off-by: Ondrej Fabry --- Gopkg.lock | 178 +- Gopkg.toml | 2 +- clientv1/linux/localclient/localclient_api.go | 7 +- clientv1/vpp/localclient/localclient_api.go | 7 +- cmd/vpp-agent/main.go | 18 +- cmd/vpp-agent/vpp_agent.go | 93 + flavors/local/local_flavor.go | 14 +- flavors/vpp/vpp_flavor.go | 28 +- idxvpp/api.go | 3 +- .../nametoidx/inmemory_name_to_idx_mapping.go | 6 +- idxvpp/persist/persistent_name_mapping.go | 3 +- plugins/govppmux/options.go | 57 + plugins/govppmux/plugin_impl_govppmux.go | 9 +- plugins/linux/data_resync.go | 2 +- .../linux/ifplugin/ifaceidx/linux_if_index.go | 5 +- .../l3plugin/l3idx/linux_l3_arp_index.go | 5 +- .../l3plugin/l3idx/linux_l3_route_index.go | 5 +- plugins/linux/linuxplugin_init.go | 19 +- plugins/linux/options.go | 57 + plugins/rest/plugin_impl_rest.go | 4 +- plugins/telemetry/config.go | 2 +- plugins/telemetry/telemetry.go | 6 +- plugins/vpp/aclplugin/aclidx/aclidx.go | 5 +- plugins/vpp/data_resync.go | 10 +- plugins/vpp/ifplugin/ifaceidx/dhcp_index.go | 5 +- plugins/vpp/ifplugin/ifaceidx/sw_if_index.go | 5 +- plugins/vpp/ifplugin/interface_state.go | 6 +- plugins/vpp/l2plugin/bd_state.go | 3 +- plugins/vpp/l2plugin/l2idx/bd_index.go | 5 +- plugins/vpp/l2plugin/l2idx/fib_index.go | 5 +- plugins/vpp/l2plugin/l2idx/xc_index.go | 5 +- plugins/vpp/l3plugin/l3idx/l3_arp_index.go | 5 +- plugins/vpp/l4plugin/nsidx/ns_index.go | 5 +- plugins/vpp/options.go | 57 + plugins/vpp/plugin_impl_vpp.go | 15 +- plugins/vpp/rpc/services.go | 4 +- plugins/vpp/watch_events.go | 1 + vendor/github.com/Shopify/sarama/.gitignore | 26 - vendor/github.com/Shopify/sarama/.travis.yml | 37 - vendor/github.com/Shopify/sarama/CHANGELOG.md | 461 ---- vendor/github.com/Shopify/sarama/LICENSE | 20 - vendor/github.com/Shopify/sarama/Makefile | 29 - vendor/github.com/Shopify/sarama/README.md | 39 - vendor/github.com/Shopify/sarama/Vagrantfile | 20 - .../Shopify/sarama/api_versions_request.go | 24 - .../Shopify/sarama/api_versions_response.go | 87 - .../Shopify/sarama/async_producer.go | 921 ------- vendor/github.com/Shopify/sarama/broker.go | 692 ----- vendor/github.com/Shopify/sarama/client.go | 794 ------ vendor/github.com/Shopify/sarama/config.go | 442 ---- vendor/github.com/Shopify/sarama/consumer.go | 806 ------ .../Shopify/sarama/consumer_group_members.go | 94 - .../sarama/consumer_metadata_request.go | 26 - .../sarama/consumer_metadata_response.go | 85 - .../github.com/Shopify/sarama/crc32_field.go | 69 - .../sarama/create_partitions_request.go | 121 - .../sarama/create_partitions_response.go | 94 - .../Shopify/sarama/describe_groups_request.go | 30 - .../sarama/describe_groups_response.go | 187 -- vendor/github.com/Shopify/sarama/dev.yml | 10 - .../Shopify/sarama/encoder_decoder.go | 89 - vendor/github.com/Shopify/sarama/errors.go | 269 -- .../Shopify/sarama/fetch_request.go | 170 -- .../Shopify/sarama/fetch_response.go | 315 --- .../Shopify/sarama/heartbeat_request.go | 47 - .../Shopify/sarama/heartbeat_response.go | 32 - .../Shopify/sarama/join_group_request.go | 143 -- .../Shopify/sarama/join_group_response.go | 115 - .../Shopify/sarama/leave_group_request.go | 40 - .../Shopify/sarama/leave_group_response.go | 32 - .../github.com/Shopify/sarama/length_field.go | 69 - .../Shopify/sarama/list_groups_request.go | 24 - .../Shopify/sarama/list_groups_response.go | 69 - vendor/github.com/Shopify/sarama/message.go | 200 -- .../github.com/Shopify/sarama/message_set.go | 89 - .../Shopify/sarama/metadata_request.go | 52 - .../Shopify/sarama/metadata_response.go | 239 -- vendor/github.com/Shopify/sarama/metrics.go | 51 - .../github.com/Shopify/sarama/mockbroker.go | 324 --- .../Shopify/sarama/mockresponses.go | 469 ---- .../github.com/Shopify/sarama/mocks/README.md | 13 - .../Shopify/sarama/mocks/async_producer.go | 174 -- .../Shopify/sarama/mocks/consumer.go | 315 --- .../github.com/Shopify/sarama/mocks/mocks.go | 48 - .../Shopify/sarama/mocks/sync_producer.go | 146 -- .../Shopify/sarama/offset_commit_request.go | 190 -- .../Shopify/sarama/offset_commit_response.go | 85 - .../Shopify/sarama/offset_fetch_request.go | 81 - .../Shopify/sarama/offset_fetch_response.go | 143 -- .../Shopify/sarama/offset_manager.go | 560 ----- .../Shopify/sarama/offset_request.go | 132 - .../Shopify/sarama/offset_response.go | 174 -- .../Shopify/sarama/packet_decoder.go | 60 - .../Shopify/sarama/packet_encoder.go | 65 - .../github.com/Shopify/sarama/partitioner.go | 135 - .../github.com/Shopify/sarama/prep_encoder.go | 153 -- .../Shopify/sarama/produce_request.go | 252 -- .../Shopify/sarama/produce_response.go | 183 -- .../github.com/Shopify/sarama/produce_set.go | 224 -- .../github.com/Shopify/sarama/real_decoder.go | 324 --- .../github.com/Shopify/sarama/real_encoder.go | 156 -- vendor/github.com/Shopify/sarama/record.go | 113 - .../github.com/Shopify/sarama/record_batch.go | 265 -- vendor/github.com/Shopify/sarama/records.go | 167 -- vendor/github.com/Shopify/sarama/request.go | 121 - .../Shopify/sarama/response_header.go | 21 - vendor/github.com/Shopify/sarama/sarama.go | 99 - .../Shopify/sarama/sasl_handshake_request.go | 33 - .../Shopify/sarama/sasl_handshake_response.go | 38 - .../Shopify/sarama/sync_group_request.go | 100 - .../Shopify/sarama/sync_group_response.go | 41 - .../Shopify/sarama/sync_producer.go | 164 -- vendor/github.com/Shopify/sarama/timestamp.go | 40 - vendor/github.com/Shopify/sarama/utils.go | 184 -- .../github.com/bsm/sarama-cluster/.gitignore | 4 - .../github.com/bsm/sarama-cluster/.travis.yml | 19 - .../github.com/bsm/sarama-cluster/Gopkg.lock | 99 - .../github.com/bsm/sarama-cluster/Gopkg.toml | 26 - vendor/github.com/bsm/sarama-cluster/LICENSE | 22 - vendor/github.com/bsm/sarama-cluster/Makefile | 35 - .../github.com/bsm/sarama-cluster/README.md | 151 -- .../bsm/sarama-cluster/README.md.tpl | 67 - .../github.com/bsm/sarama-cluster/balancer.go | 174 -- .../github.com/bsm/sarama-cluster/client.go | 50 - .../github.com/bsm/sarama-cluster/cluster.go | 25 - .../github.com/bsm/sarama-cluster/config.go | 146 -- .../github.com/bsm/sarama-cluster/consumer.go | 875 ------- vendor/github.com/bsm/sarama-cluster/doc.go | 8 - .../github.com/bsm/sarama-cluster/offsets.go | 49 - .../bsm/sarama-cluster/partitions.go | 277 --- vendor/github.com/bsm/sarama-cluster/util.go | 75 - vendor/github.com/davecgh/go-spew/LICENSE | 15 - .../github.com/davecgh/go-spew/spew/bypass.go | 152 -- .../davecgh/go-spew/spew/bypasssafe.go | 38 - .../github.com/davecgh/go-spew/spew/common.go | 341 --- .../github.com/davecgh/go-spew/spew/config.go | 306 --- vendor/github.com/davecgh/go-spew/spew/doc.go | 211 -- .../github.com/davecgh/go-spew/spew/dump.go | 509 ---- .../github.com/davecgh/go-spew/spew/format.go | 419 ---- .../github.com/davecgh/go-spew/spew/spew.go | 148 -- .../github.com/eapache/go-resiliency/LICENSE | 22 - .../eapache/go-resiliency/breaker/README.md | 33 - .../eapache/go-resiliency/breaker/breaker.go | 161 -- .../eapache/go-xerial-snappy/.gitignore | 24 - .../eapache/go-xerial-snappy/.travis.yml | 7 - .../eapache/go-xerial-snappy/LICENSE | 21 - .../eapache/go-xerial-snappy/README.md | 13 - .../eapache/go-xerial-snappy/snappy.go | 43 - vendor/github.com/eapache/queue/.gitignore | 23 - vendor/github.com/eapache/queue/.travis.yml | 7 - vendor/github.com/eapache/queue/LICENSE | 21 - vendor/github.com/eapache/queue/README.md | 16 - vendor/github.com/eapache/queue/queue.go | 102 - vendor/github.com/go-redis/redis/.gitignore | 2 - vendor/github.com/go-redis/redis/.travis.yml | 19 - vendor/github.com/go-redis/redis/LICENSE | 25 - vendor/github.com/go-redis/redis/Makefile | 19 - vendor/github.com/go-redis/redis/README.md | 143 -- vendor/github.com/go-redis/redis/cluster.go | 1284 ---------- .../go-redis/redis/cluster_commands.go | 22 - vendor/github.com/go-redis/redis/command.go | 1024 -------- vendor/github.com/go-redis/redis/commands.go | 2147 ---------------- vendor/github.com/go-redis/redis/doc.go | 4 - .../internal/consistenthash/consistenthash.go | 81 - .../go-redis/redis/internal/error.go | 84 - .../redis/internal/hashtag/hashtag.go | 77 - .../go-redis/redis/internal/internal.go | 24 - .../github.com/go-redis/redis/internal/log.go | 15 - .../go-redis/redis/internal/once.go | 60 - .../go-redis/redis/internal/pool/conn.go | 78 - .../go-redis/redis/internal/pool/pool.go | 377 --- .../redis/internal/pool/pool_single.go | 55 - .../redis/internal/pool/pool_sticky.go | 123 - .../go-redis/redis/internal/proto/reader.go | 334 --- .../go-redis/redis/internal/proto/scan.go | 133 - .../redis/internal/proto/write_buffer.go | 103 - .../go-redis/redis/internal/safe.go | 7 - .../go-redis/redis/internal/unsafe.go | 12 - .../go-redis/redis/internal/util.go | 62 - vendor/github.com/go-redis/redis/iterator.go | 73 - vendor/github.com/go-redis/redis/options.go | 200 -- vendor/github.com/go-redis/redis/parser.go | 388 --- vendor/github.com/go-redis/redis/pipeline.go | 112 - vendor/github.com/go-redis/redis/pubsub.go | 401 --- vendor/github.com/go-redis/redis/redis.go | 450 ---- .../go-redis/redis/redis_context.go | 35 - .../go-redis/redis/redis_no_context.go | 15 - vendor/github.com/go-redis/redis/result.go | 140 -- vendor/github.com/go-redis/redis/ring.go | 510 ---- vendor/github.com/go-redis/redis/script.go | 62 - vendor/github.com/go-redis/redis/sentinel.go | 337 --- vendor/github.com/go-redis/redis/tx.go | 103 - vendor/github.com/go-redis/redis/universal.go | 138 - vendor/github.com/gocql/gocql/.gitignore | 5 - vendor/github.com/gocql/gocql/.travis.yml | 45 - vendor/github.com/gocql/gocql/AUTHORS | 103 - vendor/github.com/gocql/gocql/CONTRIBUTING.md | 78 - vendor/github.com/gocql/gocql/LICENSE | 27 - vendor/github.com/gocql/gocql/README.md | 214 -- .../gocql/gocql/address_translators.go | 26 - vendor/github.com/gocql/gocql/cluster.go | 187 -- vendor/github.com/gocql/gocql/compressor.go | 28 - vendor/github.com/gocql/gocql/conn.go | 1182 --------- .../github.com/gocql/gocql/connectionpool.go | 571 ----- vendor/github.com/gocql/gocql/control.go | 480 ---- vendor/github.com/gocql/gocql/debug_off.go | 5 - vendor/github.com/gocql/gocql/debug_on.go | 5 - vendor/github.com/gocql/gocql/doc.go | 9 - vendor/github.com/gocql/gocql/errors.go | 116 - vendor/github.com/gocql/gocql/events.go | 295 --- vendor/github.com/gocql/gocql/filters.go | 57 - vendor/github.com/gocql/gocql/frame.go | 1943 --------------- vendor/github.com/gocql/gocql/fuzz.go | 33 - vendor/github.com/gocql/gocql/helpers.go | 365 --- vendor/github.com/gocql/gocql/host_source.go | 692 ----- .../github.com/gocql/gocql/host_source_gen.go | 45 - vendor/github.com/gocql/gocql/integration.sh | 87 - .../gocql/gocql/internal/lru/lru.go | 127 - .../gocql/gocql/internal/murmur/murmur.go | 135 - .../gocql/internal/murmur/murmur_appengine.go | 11 - .../gocql/internal/murmur/murmur_unsafe.go | 15 - .../gocql/gocql/internal/streams/streams.go | 140 -- vendor/github.com/gocql/gocql/logger.go | 30 - vendor/github.com/gocql/gocql/marshal.go | 2216 ----------------- vendor/github.com/gocql/gocql/metadata.go | 1092 -------- vendor/github.com/gocql/gocql/policies.go | 708 ------ .../github.com/gocql/gocql/prepared_cache.go | 64 - .../github.com/gocql/gocql/query_executor.go | 74 - vendor/github.com/gocql/gocql/ring.go | 152 -- vendor/github.com/gocql/gocql/session.go | 1730 ------------- vendor/github.com/gocql/gocql/token.go | 220 -- vendor/github.com/gocql/gocql/topology.go | 212 -- vendor/github.com/gocql/gocql/uuid.go | 272 -- vendor/github.com/golang/snappy/.gitignore | 16 - vendor/github.com/golang/snappy/AUTHORS | 15 - vendor/github.com/golang/snappy/CONTRIBUTORS | 37 - vendor/github.com/golang/snappy/LICENSE | 27 - vendor/github.com/golang/snappy/README | 107 - vendor/github.com/golang/snappy/decode.go | 237 -- .../github.com/golang/snappy/decode_amd64.go | 14 - .../github.com/golang/snappy/decode_amd64.s | 490 ---- .../github.com/golang/snappy/decode_other.go | 101 - vendor/github.com/golang/snappy/encode.go | 285 --- .../github.com/golang/snappy/encode_amd64.go | 29 - .../github.com/golang/snappy/encode_amd64.s | 730 ------ .../github.com/golang/snappy/encode_other.go | 238 -- vendor/github.com/golang/snappy/snappy.go | 87 - .../hailocab/go-hostpool/.gitignore | 22 - .../hailocab/go-hostpool/.travis.yml | 0 .../github.com/hailocab/go-hostpool/LICENSE | 21 - .../github.com/hailocab/go-hostpool/README.md | 17 - .../hailocab/go-hostpool/epsilon_greedy.go | 220 -- .../go-hostpool/epsilon_value_calculators.go | 40 - .../hailocab/go-hostpool/host_entry.go | 62 - .../hailocab/go-hostpool/hostpool.go | 243 -- vendor/github.com/hashicorp/consul/LICENSE | 354 --- .../github.com/hashicorp/consul/api/README.md | 43 - vendor/github.com/hashicorp/consul/api/acl.go | 193 -- .../github.com/hashicorp/consul/api/agent.go | 627 ----- vendor/github.com/hashicorp/consul/api/api.go | 791 ------ .../hashicorp/consul/api/catalog.go | 200 -- .../hashicorp/consul/api/coordinate.go | 106 - .../github.com/hashicorp/consul/api/event.go | 104 - .../github.com/hashicorp/consul/api/health.go | 215 -- vendor/github.com/hashicorp/consul/api/kv.go | 420 ---- .../github.com/hashicorp/consul/api/lock.go | 385 --- .../hashicorp/consul/api/operator.go | 11 - .../hashicorp/consul/api/operator_area.go | 193 -- .../consul/api/operator_autopilot.go | 219 -- .../hashicorp/consul/api/operator_keyring.go | 86 - .../hashicorp/consul/api/operator_raft.go | 89 - .../hashicorp/consul/api/operator_segment.go | 11 - .../hashicorp/consul/api/prepared_query.go | 204 -- vendor/github.com/hashicorp/consul/api/raw.go | 24 - .../hashicorp/consul/api/semaphore.go | 513 ---- .../hashicorp/consul/api/session.go | 224 -- .../hashicorp/consul/api/snapshot.go | 47 - .../github.com/hashicorp/consul/api/status.go | 43 - .../hashicorp/consul/website/LICENSE.md | 10 - .../hashicorp/go-rootcerts/.travis.yml | 12 - .../github.com/hashicorp/go-rootcerts/LICENSE | 363 --- .../hashicorp/go-rootcerts/Makefile | 8 - .../hashicorp/go-rootcerts/README.md | 43 - .../github.com/hashicorp/go-rootcerts/doc.go | 9 - .../hashicorp/go-rootcerts/rootcerts.go | 103 - .../hashicorp/go-rootcerts/rootcerts_base.go | 12 - .../go-rootcerts/rootcerts_darwin.go | 48 - .../capath-with-symlinks/securetrust.pem | 1 - .../capath-with-symlinks/thawte.pem | 1 - vendor/github.com/hashicorp/serf/LICENSE | 354 --- .../hashicorp/serf/coordinate/client.go | 180 -- .../hashicorp/serf/coordinate/config.go | 70 - .../hashicorp/serf/coordinate/coordinate.go | 183 -- .../hashicorp/serf/coordinate/phantom.go | 187 -- .../hashicorp/serf/ops-misc/debian/copyright | 2 - .../hashicorp/serf/website/source/LICENSE | 10 - vendor/github.com/howeyc/crc16/.travis.yml | 1 - vendor/github.com/howeyc/crc16/LICENSE | 27 - vendor/github.com/howeyc/crc16/README.md | 34 - vendor/github.com/howeyc/crc16/crc16.go | 161 -- vendor/github.com/howeyc/crc16/hash.go | 116 - vendor/github.com/kr/pretty/.gitignore | 4 - vendor/github.com/kr/pretty/License | 21 - vendor/github.com/kr/pretty/Readme | 9 - vendor/github.com/kr/pretty/diff.go | 265 -- vendor/github.com/kr/pretty/formatter.go | 328 --- vendor/github.com/kr/pretty/pretty.go | 108 - vendor/github.com/kr/pretty/zero.go | 41 - vendor/github.com/kr/text/License | 19 - vendor/github.com/kr/text/Readme | 3 - vendor/github.com/kr/text/doc.go | 3 - vendor/github.com/kr/text/indent.go | 74 - vendor/github.com/kr/text/wrap.go | 86 - .../github.com/ligato/cn-infra/agent/agent.go | 269 ++ .../ligato/cn-infra/agent/options.go | 133 + .../ligato/cn-infra/agent/plugin_lookup.go | 180 ++ .../ligato/cn-infra/config/plugin_config.go | 140 +- .../github.com/ligato/cn-infra/core/README.md | 19 - .../ligato/cn-infra/core/agent_core.go | 326 --- vendor/github.com/ligato/cn-infra/core/doc.go | 18 - .../ligato/cn-infra/core/event_loop.go | 47 - .../cn-infra/core/list_flavor_plugin.go | 244 -- .../github.com/ligato/cn-infra/core/name.go | 32 - .../ligato/cn-infra/core/options.go | 87 - .../ligato/cn-infra/core/plugin_spi.go | 29 - .../ligato/cn-infra/core/version.go | 25 - .../ligato/cn-infra/datasync/aggregator.go | 6 +- .../kvdbsync/local/local_bytes_txn.go | 37 +- .../kvdbsync/local/local_proto_txn.go | 53 +- .../cn-infra/datasync/kvdbsync/options.go | 59 + .../datasync/kvdbsync/plugin_impl_dbsync.go | 64 +- .../cn-infra/datasync/msgsync/options.go | 57 + .../datasync/msgsync/plugin_impl_msgsync.go | 33 +- .../ligato/cn-infra/datasync/resync/event.go | 11 +- .../cn-infra/datasync/resync/options.go | 35 + .../datasync/resync/plugin_api_resync.go | 6 +- .../datasync/resync/plugin_impl_resync.go | 6 +- .../cn-infra/db/keyval/bytes_watcher_api.go | 5 +- .../cn-infra/db/keyval/consul/README.md | 27 - .../cn-infra/db/keyval/consul/consul.conf | 6 - .../cn-infra/db/keyval/consul/consul.go | 549 ---- .../cn-infra/db/keyval/consul/plugin.go | 146 -- .../ligato/cn-infra/db/keyval/consul/txn.go | 69 - .../db/keyval/etcd/bytes_broker_impl.go | 2 +- .../ligato/cn-infra/db/keyval/etcd/config.go | 2 + .../ligato/cn-infra/db/keyval/etcd/etcd.conf | 23 +- .../ligato/cn-infra/db/keyval/etcd/options.go | 55 + .../db/keyval/etcd/plugin_impl_etcd.go | 180 +- .../cn-infra/db/keyval/plugin_api_keyval.go | 6 + .../cn-infra/db/keyval/proto_watcher_api.go | 5 +- .../ligato/cn-infra/db/keyval/redis/README.md | 151 -- .../db/keyval/redis/bytes_broker_impl.go | 531 ---- .../db/keyval/redis/bytes_txn_impl.go | 139 -- .../db/keyval/redis/bytes_watcher_impl.go | 198 -- .../ligato/cn-infra/db/keyval/redis/config.go | 371 --- .../ligato/cn-infra/db/keyval/redis/doc.go | 18 - .../db/keyval/redis/plugin_impl_redis.go | 119 - .../ligato/cn-infra/db/sql/README.md | 22 - .../cn-infra/db/sql/cassandra/README.md | 69 - .../db/sql/cassandra/cassa_broker_impl.go | 129 - .../db/sql/cassandra/cassa_txn_impl.go | 29 - .../db/sql/cassandra/cassa_watcher_impl.go | 22 - .../cn-infra/db/sql/cassandra/cassandra.conf | 26 - .../cn-infra/db/sql/cassandra/config.go | 197 -- .../ligato/cn-infra/db/sql/cassandra/doc.go | 73 - .../db/sql/cassandra/plugin_impl_cassa.go | 143 -- .../ligato/cn-infra/db/sql/cassandra/query.go | 309 --- .../github.com/ligato/cn-infra/db/sql/doc.go | 19 - .../ligato/cn-infra/db/sql/slice_utils.go | 68 - .../ligato/cn-infra/db/sql/sql_broker_api.go | 128 - .../ligato/cn-infra/db/sql/sql_expression.go | 266 -- .../cn-infra/db/sql/sql_struct_metadata.go | 56 - .../ligato/cn-infra/db/sql/sql_watcher_api.go | 55 - .../connectors/all_connectors_flavor.go | 140 -- .../flavors/connectors/connectors_util.go | 38 - .../ligato/cn-infra/flavors/connectors/doc.go | 3 - .../ligato/cn-infra/flavors/local/doc.go | 2 - .../cn-infra/flavors/local/local_flavor.go | 212 -- .../cn-infra/flavors/local/plugin_deps.go | 52 - .../ligato/cn-infra/flavors/rpc/doc.go | 3 - .../ligato/cn-infra/flavors/rpc/rpc_flavor.go | 137 - .../cn-infra/health/probe/deps_probe.go | 38 - .../ligato/cn-infra/health/probe/doc.go | 16 - .../health/probe/plugin_impl_probes.go | 98 - .../health/probe/plugin_impl_prometheus.go | 176 -- .../cn-infra/health/statuscheck/options.go | 49 + .../statuscheck/plugin_api_statuscheck.go | 17 +- .../statuscheck/plugin_impl_statuscheck.go | 85 +- .../github.com/ligato/cn-infra/idxmap/api.go | 6 +- .../github.com/ligato/cn-infra/idxmap/chan.go | 5 +- .../idxmap/mem/inmemory_name_mapping.go | 10 +- .../github.com/ligato/cn-infra/infra/infra.go | 49 + .../ligato/cn-infra/logging/log_api.go | 113 +- .../ligato/cn-infra/logging/logging.conf | 10 +- .../cn-infra/logging/logmanager/README.md | 25 - .../ligato/cn-infra/logging/logmanager/doc.go | 17 - .../logmanager/plugin_impl_log_manager.go | 223 -- .../ligato/cn-infra/logging/logrus/logger.go | 12 +- .../cn-infra/logging/logrus/registry.go | 4 +- .../ligato/cn-infra/messaging/kafka/README.md | 34 - .../messaging/kafka/client/asyncproducer.go | 267 -- .../cn-infra/messaging/kafka/client/config.go | 308 --- .../messaging/kafka/client/consumer.go | 360 --- .../cn-infra/messaging/kafka/client/doc.go | 17 - .../messaging/kafka/client/messages.go | 276 -- .../cn-infra/messaging/kafka/client/mocks.go | 181 -- .../messaging/kafka/client/syncproducer.go | 217 -- .../ligato/cn-infra/messaging/kafka/doc.go | 221 -- .../cn-infra/messaging/kafka/kafka.conf | 9 - .../cn-infra/messaging/kafka/mux/README.md | 29 - .../messaging/kafka/mux/bytes_connection.go | 323 --- .../cn-infra/messaging/kafka/mux/chan.go | 66 - .../cn-infra/messaging/kafka/mux/config.go | 153 -- .../cn-infra/messaging/kafka/mux/doc.go | 17 - .../cn-infra/messaging/kafka/mux/mock.go | 49 - .../messaging/kafka/mux/multiplexer.go | 372 --- .../messaging/kafka/mux/proto_connection.go | 364 --- .../messaging/kafka/plugin_impl_kafka.go | 237 -- .../ligato/cn-infra/rpc/grpc/config.go | 17 +- .../ligato/cn-infra/rpc/grpc/grpc.conf | 7 +- .../cn-infra/rpc/grpc/listen_and_serve.go | 4 +- .../ligato/cn-infra/rpc/grpc/options.go | 69 + .../cn-infra/rpc/grpc/plugin_impl_grpc.go | 47 +- .../ligato/cn-infra/rpc/prometheus/options.go | 37 + .../rpc/prometheus/plugin_impl_prometheus.go | 17 +- .../ligato/cn-infra/rpc/rest/auth.go | 68 + .../ligato/cn-infra/rpc/rest/config.go | 87 +- .../ligato/cn-infra/rpc/rest/http.conf | 18 +- .../ligato/cn-infra/rpc/rest/options.go | 72 + .../cn-infra/rpc/rest/plugin_impl_fork.go | 69 +- .../cn-infra/rpc/rest/plugin_impl_rest.go | 110 +- .../ligato/cn-infra/servicelabel/options.go | 20 + .../servicelabel/plugin_impl_servicelabel.go | 2 + .../ligato/cn-infra/utils/clienttls/doc.go | 16 - .../cn-infra/utils/clienttls/tlsutil.go | 65 - .../once/return_error.go} | 26 +- .../ligato/cn-infra/utils/structs/doc.go | 16 - .../utils/structs/structs_reflection.go | 136 - vendor/github.com/maraino/go-mock/.travis.yml | 9 - vendor/github.com/maraino/go-mock/AUTHORS | 4 - vendor/github.com/maraino/go-mock/LICENSE | 22 - vendor/github.com/maraino/go-mock/Makefile | 11 - vendor/github.com/maraino/go-mock/README.md | 201 -- vendor/github.com/maraino/go-mock/mock.go | 683 ----- .../github.com/mitchellh/go-homedir/LICENSE | 21 - .../github.com/mitchellh/go-homedir/README.md | 14 - .../mitchellh/go-homedir/homedir.go | 137 - vendor/github.com/pierrec/lz4/.gitignore | 31 - vendor/github.com/pierrec/lz4/.travis.yml | 8 - vendor/github.com/pierrec/lz4/LICENSE | 28 - vendor/github.com/pierrec/lz4/README.md | 31 - vendor/github.com/pierrec/lz4/block.go | 454 ---- vendor/github.com/pierrec/lz4/lz4.go | 105 - vendor/github.com/pierrec/lz4/reader.go | 364 --- vendor/github.com/pierrec/lz4/writer.go | 377 --- vendor/github.com/pierrec/xxHash/LICENSE | 28 - .../pierrec/xxHash/xxHash32/xxHash32.go | 205 -- .../github.com/rcrowley/go-metrics/.gitignore | 9 - .../rcrowley/go-metrics/.travis.yml | 18 - vendor/github.com/rcrowley/go-metrics/LICENSE | 29 - .../github.com/rcrowley/go-metrics/README.md | 166 -- .../github.com/rcrowley/go-metrics/counter.go | 112 - .../github.com/rcrowley/go-metrics/debug.go | 76 - vendor/github.com/rcrowley/go-metrics/ewma.go | 118 - .../github.com/rcrowley/go-metrics/gauge.go | 120 - .../rcrowley/go-metrics/gauge_float64.go | 127 - .../rcrowley/go-metrics/graphite.go | 113 - .../rcrowley/go-metrics/healthcheck.go | 61 - .../rcrowley/go-metrics/histogram.go | 202 -- vendor/github.com/rcrowley/go-metrics/json.go | 31 - vendor/github.com/rcrowley/go-metrics/log.go | 80 - .../github.com/rcrowley/go-metrics/memory.md | 285 --- .../github.com/rcrowley/go-metrics/meter.go | 264 -- .../github.com/rcrowley/go-metrics/metrics.go | 13 - .../rcrowley/go-metrics/opentsdb.go | 119 - .../rcrowley/go-metrics/registry.go | 354 --- .../github.com/rcrowley/go-metrics/runtime.go | 212 -- .../rcrowley/go-metrics/runtime_cgo.go | 10 - .../go-metrics/runtime_gccpufraction.go | 9 - .../rcrowley/go-metrics/runtime_no_cgo.go | 7 - .../go-metrics/runtime_no_gccpufraction.go | 9 - .../github.com/rcrowley/go-metrics/sample.go | 616 ----- .../github.com/rcrowley/go-metrics/syslog.go | 78 - .../github.com/rcrowley/go-metrics/timer.go | 329 --- .../rcrowley/go-metrics/validate.sh | 10 - .../github.com/rcrowley/go-metrics/writer.go | 100 - .../github.com/willfaught/gockle/.travis.yml | 12 - vendor/github.com/willfaught/gockle/batch.go | 108 - vendor/github.com/willfaught/gockle/doc.go | 15 - .../github.com/willfaught/gockle/iterator.go | 61 - .../github.com/willfaught/gockle/license.md | 21 - vendor/github.com/willfaught/gockle/readme.md | 8 - .../github.com/willfaught/gockle/session.go | 228 -- vendor/gopkg.in/inf.v0/LICENSE | 28 - vendor/gopkg.in/inf.v0/dec.go | 615 ----- vendor/gopkg.in/inf.v0/rounder.go | 145 -- 496 files changed, 2187 insertions(+), 69997 deletions(-) create mode 100644 cmd/vpp-agent/vpp_agent.go create mode 100644 plugins/govppmux/options.go create mode 100644 plugins/linux/options.go create mode 100644 plugins/vpp/options.go delete mode 100644 vendor/github.com/Shopify/sarama/.gitignore delete mode 100644 vendor/github.com/Shopify/sarama/.travis.yml delete mode 100644 vendor/github.com/Shopify/sarama/CHANGELOG.md delete mode 100644 vendor/github.com/Shopify/sarama/LICENSE delete mode 100644 vendor/github.com/Shopify/sarama/Makefile delete mode 100644 vendor/github.com/Shopify/sarama/README.md delete mode 100644 vendor/github.com/Shopify/sarama/Vagrantfile delete mode 100644 vendor/github.com/Shopify/sarama/api_versions_request.go delete mode 100644 vendor/github.com/Shopify/sarama/api_versions_response.go delete mode 100644 vendor/github.com/Shopify/sarama/async_producer.go delete mode 100644 vendor/github.com/Shopify/sarama/broker.go delete mode 100644 vendor/github.com/Shopify/sarama/client.go delete mode 100644 vendor/github.com/Shopify/sarama/config.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer_group_members.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_request.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_response.go delete mode 100644 vendor/github.com/Shopify/sarama/crc32_field.go delete mode 100644 vendor/github.com/Shopify/sarama/create_partitions_request.go delete mode 100644 vendor/github.com/Shopify/sarama/create_partitions_response.go delete mode 100644 vendor/github.com/Shopify/sarama/describe_groups_request.go delete mode 100644 vendor/github.com/Shopify/sarama/describe_groups_response.go delete mode 100644 vendor/github.com/Shopify/sarama/dev.yml delete mode 100644 vendor/github.com/Shopify/sarama/encoder_decoder.go delete mode 100644 vendor/github.com/Shopify/sarama/errors.go delete mode 100644 vendor/github.com/Shopify/sarama/fetch_request.go delete mode 100644 vendor/github.com/Shopify/sarama/fetch_response.go delete mode 100644 vendor/github.com/Shopify/sarama/heartbeat_request.go delete mode 100644 vendor/github.com/Shopify/sarama/heartbeat_response.go delete mode 100644 vendor/github.com/Shopify/sarama/join_group_request.go delete mode 100644 vendor/github.com/Shopify/sarama/join_group_response.go delete mode 100644 vendor/github.com/Shopify/sarama/leave_group_request.go delete mode 100644 vendor/github.com/Shopify/sarama/leave_group_response.go delete mode 100644 vendor/github.com/Shopify/sarama/length_field.go delete mode 100644 vendor/github.com/Shopify/sarama/list_groups_request.go delete mode 100644 vendor/github.com/Shopify/sarama/list_groups_response.go delete mode 100644 vendor/github.com/Shopify/sarama/message.go delete mode 100644 vendor/github.com/Shopify/sarama/message_set.go delete mode 100644 vendor/github.com/Shopify/sarama/metadata_request.go delete mode 100644 vendor/github.com/Shopify/sarama/metadata_response.go delete mode 100644 vendor/github.com/Shopify/sarama/metrics.go delete mode 100644 vendor/github.com/Shopify/sarama/mockbroker.go delete mode 100644 vendor/github.com/Shopify/sarama/mockresponses.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/README.md delete mode 100644 vendor/github.com/Shopify/sarama/mocks/async_producer.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/consumer.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/mocks.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/sync_producer.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_commit_request.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_commit_response.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_request.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_response.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_manager.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_request.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_response.go delete mode 100644 vendor/github.com/Shopify/sarama/packet_decoder.go delete mode 100644 vendor/github.com/Shopify/sarama/packet_encoder.go delete mode 100644 vendor/github.com/Shopify/sarama/partitioner.go delete mode 100644 vendor/github.com/Shopify/sarama/prep_encoder.go delete mode 100644 vendor/github.com/Shopify/sarama/produce_request.go delete mode 100644 vendor/github.com/Shopify/sarama/produce_response.go delete mode 100644 vendor/github.com/Shopify/sarama/produce_set.go delete mode 100644 vendor/github.com/Shopify/sarama/real_decoder.go delete mode 100644 vendor/github.com/Shopify/sarama/real_encoder.go delete mode 100644 vendor/github.com/Shopify/sarama/record.go delete mode 100644 vendor/github.com/Shopify/sarama/record_batch.go delete mode 100644 vendor/github.com/Shopify/sarama/records.go delete mode 100644 vendor/github.com/Shopify/sarama/request.go delete mode 100644 vendor/github.com/Shopify/sarama/response_header.go delete mode 100644 vendor/github.com/Shopify/sarama/sarama.go delete mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_request.go delete mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_response.go delete mode 100644 vendor/github.com/Shopify/sarama/sync_group_request.go delete mode 100644 vendor/github.com/Shopify/sarama/sync_group_response.go delete mode 100644 vendor/github.com/Shopify/sarama/sync_producer.go delete mode 100644 vendor/github.com/Shopify/sarama/timestamp.go delete mode 100644 vendor/github.com/Shopify/sarama/utils.go delete mode 100644 vendor/github.com/bsm/sarama-cluster/.gitignore delete mode 100644 vendor/github.com/bsm/sarama-cluster/.travis.yml delete mode 100644 vendor/github.com/bsm/sarama-cluster/Gopkg.lock delete mode 100644 vendor/github.com/bsm/sarama-cluster/Gopkg.toml delete mode 100644 vendor/github.com/bsm/sarama-cluster/LICENSE delete mode 100644 vendor/github.com/bsm/sarama-cluster/Makefile delete mode 100644 vendor/github.com/bsm/sarama-cluster/README.md delete mode 100644 vendor/github.com/bsm/sarama-cluster/README.md.tpl delete mode 100644 vendor/github.com/bsm/sarama-cluster/balancer.go delete mode 100644 vendor/github.com/bsm/sarama-cluster/client.go delete mode 100644 vendor/github.com/bsm/sarama-cluster/cluster.go delete mode 100644 vendor/github.com/bsm/sarama-cluster/config.go delete mode 100644 vendor/github.com/bsm/sarama-cluster/consumer.go delete mode 100644 vendor/github.com/bsm/sarama-cluster/doc.go delete mode 100644 vendor/github.com/bsm/sarama-cluster/offsets.go delete mode 100644 vendor/github.com/bsm/sarama-cluster/partitions.go delete mode 100644 vendor/github.com/bsm/sarama-cluster/util.go delete mode 100644 vendor/github.com/davecgh/go-spew/LICENSE delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go delete mode 100644 vendor/github.com/eapache/go-resiliency/LICENSE delete mode 100644 vendor/github.com/eapache/go-resiliency/breaker/README.md delete mode 100644 vendor/github.com/eapache/go-resiliency/breaker/breaker.go delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/.gitignore delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/.travis.yml delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/LICENSE delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/README.md delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/snappy.go delete mode 100644 vendor/github.com/eapache/queue/.gitignore delete mode 100644 vendor/github.com/eapache/queue/.travis.yml delete mode 100644 vendor/github.com/eapache/queue/LICENSE delete mode 100644 vendor/github.com/eapache/queue/README.md delete mode 100644 vendor/github.com/eapache/queue/queue.go delete mode 100644 vendor/github.com/go-redis/redis/.gitignore delete mode 100644 vendor/github.com/go-redis/redis/.travis.yml delete mode 100644 vendor/github.com/go-redis/redis/LICENSE delete mode 100644 vendor/github.com/go-redis/redis/Makefile delete mode 100644 vendor/github.com/go-redis/redis/README.md delete mode 100644 vendor/github.com/go-redis/redis/cluster.go delete mode 100644 vendor/github.com/go-redis/redis/cluster_commands.go delete mode 100644 vendor/github.com/go-redis/redis/command.go delete mode 100644 vendor/github.com/go-redis/redis/commands.go delete mode 100644 vendor/github.com/go-redis/redis/doc.go delete mode 100644 vendor/github.com/go-redis/redis/internal/consistenthash/consistenthash.go delete mode 100644 vendor/github.com/go-redis/redis/internal/error.go delete mode 100644 vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go delete mode 100644 vendor/github.com/go-redis/redis/internal/internal.go delete mode 100644 vendor/github.com/go-redis/redis/internal/log.go delete mode 100644 vendor/github.com/go-redis/redis/internal/once.go delete mode 100644 vendor/github.com/go-redis/redis/internal/pool/conn.go delete mode 100644 vendor/github.com/go-redis/redis/internal/pool/pool.go delete mode 100644 vendor/github.com/go-redis/redis/internal/pool/pool_single.go delete mode 100644 vendor/github.com/go-redis/redis/internal/pool/pool_sticky.go delete mode 100644 vendor/github.com/go-redis/redis/internal/proto/reader.go delete mode 100644 vendor/github.com/go-redis/redis/internal/proto/scan.go delete mode 100644 vendor/github.com/go-redis/redis/internal/proto/write_buffer.go delete mode 100644 vendor/github.com/go-redis/redis/internal/safe.go delete mode 100644 vendor/github.com/go-redis/redis/internal/unsafe.go delete mode 100644 vendor/github.com/go-redis/redis/internal/util.go delete mode 100644 vendor/github.com/go-redis/redis/iterator.go delete mode 100644 vendor/github.com/go-redis/redis/options.go delete mode 100644 vendor/github.com/go-redis/redis/parser.go delete mode 100644 vendor/github.com/go-redis/redis/pipeline.go delete mode 100644 vendor/github.com/go-redis/redis/pubsub.go delete mode 100644 vendor/github.com/go-redis/redis/redis.go delete mode 100644 vendor/github.com/go-redis/redis/redis_context.go delete mode 100644 vendor/github.com/go-redis/redis/redis_no_context.go delete mode 100644 vendor/github.com/go-redis/redis/result.go delete mode 100644 vendor/github.com/go-redis/redis/ring.go delete mode 100644 vendor/github.com/go-redis/redis/script.go delete mode 100644 vendor/github.com/go-redis/redis/sentinel.go delete mode 100644 vendor/github.com/go-redis/redis/tx.go delete mode 100644 vendor/github.com/go-redis/redis/universal.go delete mode 100644 vendor/github.com/gocql/gocql/.gitignore delete mode 100644 vendor/github.com/gocql/gocql/.travis.yml delete mode 100644 vendor/github.com/gocql/gocql/AUTHORS delete mode 100644 vendor/github.com/gocql/gocql/CONTRIBUTING.md delete mode 100644 vendor/github.com/gocql/gocql/LICENSE delete mode 100644 vendor/github.com/gocql/gocql/README.md delete mode 100644 vendor/github.com/gocql/gocql/address_translators.go delete mode 100644 vendor/github.com/gocql/gocql/cluster.go delete mode 100644 vendor/github.com/gocql/gocql/compressor.go delete mode 100644 vendor/github.com/gocql/gocql/conn.go delete mode 100644 vendor/github.com/gocql/gocql/connectionpool.go delete mode 100644 vendor/github.com/gocql/gocql/control.go delete mode 100644 vendor/github.com/gocql/gocql/debug_off.go delete mode 100644 vendor/github.com/gocql/gocql/debug_on.go delete mode 100644 vendor/github.com/gocql/gocql/doc.go delete mode 100644 vendor/github.com/gocql/gocql/errors.go delete mode 100644 vendor/github.com/gocql/gocql/events.go delete mode 100644 vendor/github.com/gocql/gocql/filters.go delete mode 100644 vendor/github.com/gocql/gocql/frame.go delete mode 100644 vendor/github.com/gocql/gocql/fuzz.go delete mode 100644 vendor/github.com/gocql/gocql/helpers.go delete mode 100644 vendor/github.com/gocql/gocql/host_source.go delete mode 100644 vendor/github.com/gocql/gocql/host_source_gen.go delete mode 100755 vendor/github.com/gocql/gocql/integration.sh delete mode 100644 vendor/github.com/gocql/gocql/internal/lru/lru.go delete mode 100644 vendor/github.com/gocql/gocql/internal/murmur/murmur.go delete mode 100644 vendor/github.com/gocql/gocql/internal/murmur/murmur_appengine.go delete mode 100644 vendor/github.com/gocql/gocql/internal/murmur/murmur_unsafe.go delete mode 100644 vendor/github.com/gocql/gocql/internal/streams/streams.go delete mode 100644 vendor/github.com/gocql/gocql/logger.go delete mode 100644 vendor/github.com/gocql/gocql/marshal.go delete mode 100644 vendor/github.com/gocql/gocql/metadata.go delete mode 100644 vendor/github.com/gocql/gocql/policies.go delete mode 100644 vendor/github.com/gocql/gocql/prepared_cache.go delete mode 100644 vendor/github.com/gocql/gocql/query_executor.go delete mode 100644 vendor/github.com/gocql/gocql/ring.go delete mode 100644 vendor/github.com/gocql/gocql/session.go delete mode 100644 vendor/github.com/gocql/gocql/token.go delete mode 100644 vendor/github.com/gocql/gocql/topology.go delete mode 100644 vendor/github.com/gocql/gocql/uuid.go delete mode 100644 vendor/github.com/golang/snappy/.gitignore delete mode 100644 vendor/github.com/golang/snappy/AUTHORS delete mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS delete mode 100644 vendor/github.com/golang/snappy/LICENSE delete mode 100644 vendor/github.com/golang/snappy/README delete mode 100644 vendor/github.com/golang/snappy/decode.go delete mode 100644 vendor/github.com/golang/snappy/decode_amd64.go delete mode 100644 vendor/github.com/golang/snappy/decode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/decode_other.go delete mode 100644 vendor/github.com/golang/snappy/encode.go delete mode 100644 vendor/github.com/golang/snappy/encode_amd64.go delete mode 100644 vendor/github.com/golang/snappy/encode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/encode_other.go delete mode 100644 vendor/github.com/golang/snappy/snappy.go delete mode 100644 vendor/github.com/hailocab/go-hostpool/.gitignore delete mode 100644 vendor/github.com/hailocab/go-hostpool/.travis.yml delete mode 100644 vendor/github.com/hailocab/go-hostpool/LICENSE delete mode 100644 vendor/github.com/hailocab/go-hostpool/README.md delete mode 100644 vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go delete mode 100644 vendor/github.com/hailocab/go-hostpool/epsilon_value_calculators.go delete mode 100644 vendor/github.com/hailocab/go-hostpool/host_entry.go delete mode 100644 vendor/github.com/hailocab/go-hostpool/hostpool.go delete mode 100644 vendor/github.com/hashicorp/consul/LICENSE delete mode 100644 vendor/github.com/hashicorp/consul/api/README.md delete mode 100644 vendor/github.com/hashicorp/consul/api/acl.go delete mode 100644 vendor/github.com/hashicorp/consul/api/agent.go delete mode 100644 vendor/github.com/hashicorp/consul/api/api.go delete mode 100644 vendor/github.com/hashicorp/consul/api/catalog.go delete mode 100644 vendor/github.com/hashicorp/consul/api/coordinate.go delete mode 100644 vendor/github.com/hashicorp/consul/api/event.go delete mode 100644 vendor/github.com/hashicorp/consul/api/health.go delete mode 100644 vendor/github.com/hashicorp/consul/api/kv.go delete mode 100644 vendor/github.com/hashicorp/consul/api/lock.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_area.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_autopilot.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_keyring.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_raft.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_segment.go delete mode 100644 vendor/github.com/hashicorp/consul/api/prepared_query.go delete mode 100644 vendor/github.com/hashicorp/consul/api/raw.go delete mode 100644 vendor/github.com/hashicorp/consul/api/semaphore.go delete mode 100644 vendor/github.com/hashicorp/consul/api/session.go delete mode 100644 vendor/github.com/hashicorp/consul/api/snapshot.go delete mode 100644 vendor/github.com/hashicorp/consul/api/status.go delete mode 100644 vendor/github.com/hashicorp/consul/website/LICENSE.md delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/.travis.yml delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/Makefile delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/README.md delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/doc.go delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts.go delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go delete mode 120000 vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem delete mode 120000 vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem delete mode 100644 vendor/github.com/hashicorp/serf/LICENSE delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/client.go delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/config.go delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/coordinate.go delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/phantom.go delete mode 100644 vendor/github.com/hashicorp/serf/ops-misc/debian/copyright delete mode 100644 vendor/github.com/hashicorp/serf/website/source/LICENSE delete mode 100644 vendor/github.com/howeyc/crc16/.travis.yml delete mode 100644 vendor/github.com/howeyc/crc16/LICENSE delete mode 100644 vendor/github.com/howeyc/crc16/README.md delete mode 100644 vendor/github.com/howeyc/crc16/crc16.go delete mode 100644 vendor/github.com/howeyc/crc16/hash.go delete mode 100644 vendor/github.com/kr/pretty/.gitignore delete mode 100644 vendor/github.com/kr/pretty/License delete mode 100644 vendor/github.com/kr/pretty/Readme delete mode 100644 vendor/github.com/kr/pretty/diff.go delete mode 100644 vendor/github.com/kr/pretty/formatter.go delete mode 100644 vendor/github.com/kr/pretty/pretty.go delete mode 100644 vendor/github.com/kr/pretty/zero.go delete mode 100644 vendor/github.com/kr/text/License delete mode 100644 vendor/github.com/kr/text/Readme delete mode 100644 vendor/github.com/kr/text/doc.go delete mode 100644 vendor/github.com/kr/text/indent.go delete mode 100644 vendor/github.com/kr/text/wrap.go create mode 100644 vendor/github.com/ligato/cn-infra/agent/agent.go create mode 100644 vendor/github.com/ligato/cn-infra/agent/options.go create mode 100644 vendor/github.com/ligato/cn-infra/agent/plugin_lookup.go delete mode 100644 vendor/github.com/ligato/cn-infra/core/README.md delete mode 100644 vendor/github.com/ligato/cn-infra/core/agent_core.go delete mode 100644 vendor/github.com/ligato/cn-infra/core/doc.go delete mode 100644 vendor/github.com/ligato/cn-infra/core/event_loop.go delete mode 100644 vendor/github.com/ligato/cn-infra/core/list_flavor_plugin.go delete mode 100644 vendor/github.com/ligato/cn-infra/core/name.go delete mode 100644 vendor/github.com/ligato/cn-infra/core/options.go delete mode 100644 vendor/github.com/ligato/cn-infra/core/plugin_spi.go delete mode 100644 vendor/github.com/ligato/cn-infra/core/version.go create mode 100644 vendor/github.com/ligato/cn-infra/datasync/kvdbsync/options.go create mode 100644 vendor/github.com/ligato/cn-infra/datasync/msgsync/options.go create mode 100644 vendor/github.com/ligato/cn-infra/datasync/resync/options.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/consul/README.md delete mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/consul/consul.conf delete mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/consul/consul.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/consul/plugin.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/consul/txn.go create mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/etcd/options.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/redis/README.md delete mode 100755 vendor/github.com/ligato/cn-infra/db/keyval/redis/bytes_broker_impl.go delete mode 100755 vendor/github.com/ligato/cn-infra/db/keyval/redis/bytes_txn_impl.go delete mode 100755 vendor/github.com/ligato/cn-infra/db/keyval/redis/bytes_watcher_impl.go delete mode 100755 vendor/github.com/ligato/cn-infra/db/keyval/redis/config.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/redis/doc.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/redis/plugin_impl_redis.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/README.md delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/README.md delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassa_broker_impl.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassa_txn_impl.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassa_watcher_impl.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassandra.conf delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/config.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/doc.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/plugin_impl_cassa.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/query.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/doc.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/slice_utils.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/sql_broker_api.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/sql_expression.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/sql_struct_metadata.go delete mode 100644 vendor/github.com/ligato/cn-infra/db/sql/sql_watcher_api.go delete mode 100644 vendor/github.com/ligato/cn-infra/flavors/connectors/all_connectors_flavor.go delete mode 100644 vendor/github.com/ligato/cn-infra/flavors/connectors/connectors_util.go delete mode 100644 vendor/github.com/ligato/cn-infra/flavors/connectors/doc.go delete mode 100644 vendor/github.com/ligato/cn-infra/flavors/local/doc.go delete mode 100644 vendor/github.com/ligato/cn-infra/flavors/local/local_flavor.go delete mode 100644 vendor/github.com/ligato/cn-infra/flavors/local/plugin_deps.go delete mode 100644 vendor/github.com/ligato/cn-infra/flavors/rpc/doc.go delete mode 100644 vendor/github.com/ligato/cn-infra/flavors/rpc/rpc_flavor.go delete mode 100644 vendor/github.com/ligato/cn-infra/health/probe/deps_probe.go delete mode 100644 vendor/github.com/ligato/cn-infra/health/probe/doc.go delete mode 100644 vendor/github.com/ligato/cn-infra/health/probe/plugin_impl_probes.go delete mode 100644 vendor/github.com/ligato/cn-infra/health/probe/plugin_impl_prometheus.go create mode 100644 vendor/github.com/ligato/cn-infra/health/statuscheck/options.go create mode 100644 vendor/github.com/ligato/cn-infra/infra/infra.go delete mode 100644 vendor/github.com/ligato/cn-infra/logging/logmanager/README.md delete mode 100644 vendor/github.com/ligato/cn-infra/logging/logmanager/doc.go delete mode 100644 vendor/github.com/ligato/cn-infra/logging/logmanager/plugin_impl_log_manager.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/README.md delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/client/asyncproducer.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/client/config.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/client/consumer.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/client/doc.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/client/messages.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/client/mocks.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/client/syncproducer.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/doc.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/kafka.conf delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/README.md delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/bytes_connection.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/chan.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/config.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/doc.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/mock.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/multiplexer.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/proto_connection.go delete mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/plugin_impl_kafka.go create mode 100644 vendor/github.com/ligato/cn-infra/rpc/grpc/options.go create mode 100644 vendor/github.com/ligato/cn-infra/rpc/prometheus/options.go create mode 100644 vendor/github.com/ligato/cn-infra/rpc/rest/auth.go create mode 100644 vendor/github.com/ligato/cn-infra/rpc/rest/options.go create mode 100644 vendor/github.com/ligato/cn-infra/servicelabel/options.go delete mode 100644 vendor/github.com/ligato/cn-infra/utils/clienttls/doc.go delete mode 100644 vendor/github.com/ligato/cn-infra/utils/clienttls/tlsutil.go rename vendor/github.com/ligato/cn-infra/{db/sql/plugin_api_sql.go => utils/once/return_error.go} (55%) delete mode 100644 vendor/github.com/ligato/cn-infra/utils/structs/doc.go delete mode 100644 vendor/github.com/ligato/cn-infra/utils/structs/structs_reflection.go delete mode 100644 vendor/github.com/maraino/go-mock/.travis.yml delete mode 100644 vendor/github.com/maraino/go-mock/AUTHORS delete mode 100644 vendor/github.com/maraino/go-mock/LICENSE delete mode 100644 vendor/github.com/maraino/go-mock/Makefile delete mode 100644 vendor/github.com/maraino/go-mock/README.md delete mode 100644 vendor/github.com/maraino/go-mock/mock.go delete mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE delete mode 100644 vendor/github.com/mitchellh/go-homedir/README.md delete mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go delete mode 100644 vendor/github.com/pierrec/lz4/.gitignore delete mode 100644 vendor/github.com/pierrec/lz4/.travis.yml delete mode 100644 vendor/github.com/pierrec/lz4/LICENSE delete mode 100644 vendor/github.com/pierrec/lz4/README.md delete mode 100644 vendor/github.com/pierrec/lz4/block.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4.go delete mode 100644 vendor/github.com/pierrec/lz4/reader.go delete mode 100644 vendor/github.com/pierrec/lz4/writer.go delete mode 100644 vendor/github.com/pierrec/xxHash/LICENSE delete mode 100644 vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/.gitignore delete mode 100644 vendor/github.com/rcrowley/go-metrics/.travis.yml delete mode 100644 vendor/github.com/rcrowley/go-metrics/LICENSE delete mode 100644 vendor/github.com/rcrowley/go-metrics/README.md delete mode 100644 vendor/github.com/rcrowley/go-metrics/counter.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/debug.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/ewma.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/gauge.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/gauge_float64.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/graphite.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/healthcheck.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/histogram.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/json.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/log.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/memory.md delete mode 100644 vendor/github.com/rcrowley/go-metrics/meter.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/metrics.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/opentsdb.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/registry.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/runtime.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_cgo.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/sample.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/syslog.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/timer.go delete mode 100755 vendor/github.com/rcrowley/go-metrics/validate.sh delete mode 100644 vendor/github.com/rcrowley/go-metrics/writer.go delete mode 100644 vendor/github.com/willfaught/gockle/.travis.yml delete mode 100644 vendor/github.com/willfaught/gockle/batch.go delete mode 100644 vendor/github.com/willfaught/gockle/doc.go delete mode 100644 vendor/github.com/willfaught/gockle/iterator.go delete mode 100644 vendor/github.com/willfaught/gockle/license.md delete mode 100644 vendor/github.com/willfaught/gockle/readme.md delete mode 100644 vendor/github.com/willfaught/gockle/session.go delete mode 100644 vendor/gopkg.in/inf.v0/LICENSE delete mode 100644 vendor/gopkg.in/inf.v0/dec.go delete mode 100644 vendor/gopkg.in/inf.v0/rounder.go diff --git a/Gopkg.lock b/Gopkg.lock index cedeba4a73..007dc509d9 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -37,15 +37,6 @@ packages = ["."] revision = "cd527374f1e5bff4938207604a14f2e38a9cf512" -[[projects]] - name = "github.com/Shopify/sarama" - packages = [ - ".", - "mocks" - ] - revision = "3b1b38866a79f06deddf0487d5c27ba0697ccd65" - version = "v1.15.0" - [[projects]] branch = "master" name = "github.com/bennyscetbun/jsongo" @@ -58,12 +49,6 @@ packages = ["quantile"] revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" -[[projects]] - name = "github.com/bsm/sarama-cluster" - packages = ["."] - revision = "24016d206c730276dfb58f802999066f2f4bfeaa" - version = "v2.1.11" - [[projects]] branch = "master" name = "github.com/buger/goterm" @@ -90,12 +75,6 @@ revision = "66722b1ada68fcd5227db853ee92003169a975c8" version = "v3.2.0" -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - [[projects]] name = "github.com/docker/docker" packages = [ @@ -139,24 +118,6 @@ revision = "0dadbb0345b35ec7ef35e228dabb8de89a65bf52" version = "v0.3.2" -[[projects]] - name = "github.com/eapache/go-resiliency" - packages = ["breaker"] - revision = "6800482f2c813e689c88b7ed3282262385011890" - version = "v1.0.0" - -[[projects]] - branch = "master" - name = "github.com/eapache/go-xerial-snappy" - packages = ["."] - revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c" - -[[projects]] - name = "github.com/eapache/queue" - packages = ["."] - revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" - version = "v1.1.0" - [[projects]] name = "github.com/elazarl/go-bindata-assetfs" packages = ["."] @@ -180,30 +141,6 @@ revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" version = "v1.0.0" -[[projects]] - name = "github.com/go-redis/redis" - packages = [ - ".", - "internal", - "internal/consistenthash", - "internal/hashtag", - "internal/pool", - "internal/proto" - ] - revision = "4021ace05686f632ff17fd824bbed229fc474cf8" - version = "v6.8.2" - -[[projects]] - branch = "master" - name = "github.com/gocql/gocql" - packages = [ - ".", - "internal/lru", - "internal/murmur", - "internal/streams" - ] - revision = "dd47639f787e8583e1a13e17e20a9f6c4332bc29" - [[projects]] name = "github.com/gogo/protobuf" packages = [ @@ -242,12 +179,6 @@ packages = ["proto"] revision = "c65a0412e71e8b9b3bfd22925720d23c0f054237" -[[projects]] - branch = "master" - name = "github.com/golang/snappy" - packages = ["."] - revision = "553a641470496b2327abcac10b36396bd98e45c9" - [[projects]] name = "github.com/gorilla/context" packages = ["."] @@ -260,65 +191,23 @@ revision = "53c1911da2b537f792e7cafcb446b05ffe33b996" version = "v1.6.1" -[[projects]] - branch = "master" - name = "github.com/hailocab/go-hostpool" - packages = ["."] - revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478" - -[[projects]] - name = "github.com/hashicorp/consul" - packages = ["api"] - revision = "fb848fc48818f58690db09d14640513aa6bf3c02" - version = "v1.0.7" - [[projects]] branch = "master" name = "github.com/hashicorp/go-cleanhttp" packages = ["."] revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d" -[[projects]] - branch = "master" - name = "github.com/hashicorp/go-rootcerts" - packages = ["."] - revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" - -[[projects]] - name = "github.com/hashicorp/serf" - packages = ["coordinate"] - revision = "d6574a5bb1226678d7010325fb6c985db20ee458" - version = "v0.8.1" - -[[projects]] - branch = "master" - name = "github.com/howeyc/crc16" - packages = ["."] - revision = "2b2a61e366a66d3efb279e46176e7291001e0354" - [[projects]] name = "github.com/inconshreveable/mousetrap" packages = ["."] revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" version = "v1.0" -[[projects]] - branch = "master" - name = "github.com/kr/pretty" - packages = ["."] - revision = "cfb55aafdaf3ec08f0db22699ab822c50091b1c4" - -[[projects]] - branch = "master" - name = "github.com/kr/text" - packages = ["."] - revision = "7cafcd837844e784b526369c9bce262804aebc60" - [[projects]] name = "github.com/ligato/cn-infra" packages = [ + "agent", "config", - "core", "datasync", "datasync/kvdbsync", "datasync/kvdbsync/local", @@ -326,39 +215,26 @@ "datasync/resync", "datasync/syncbase", "db/keyval", - "db/keyval/consul", "db/keyval/etcd", "db/keyval/kvproto", - "db/keyval/redis", - "db/sql", - "db/sql/cassandra", - "flavors/connectors", - "flavors/local", - "flavors/rpc", - "health/probe", "health/statuscheck", "health/statuscheck/model/status", "idxmap", "idxmap/mem", + "infra", "logging", - "logging/logmanager", "logging/logrus", "logging/measure", "messaging", - "messaging/kafka", - "messaging/kafka/client", - "messaging/kafka/mux", "rpc/grpc", "rpc/prometheus", "rpc/rest", "servicelabel", "utils/addrs", - "utils/clienttls", - "utils/safeclose", - "utils/structs" + "utils/once", + "utils/safeclose" ] - revision = "22c14be88d97b2b8c38cce0a7141ea18bd042143" - version = "v1.4.1" + revision = "52432e9cff91cd15e5cd0001be7e1034253adf0a" [[projects]] branch = "master" @@ -372,24 +248,12 @@ packages = ["."] revision = "ef56447db6a068ad9e52bc54a1aff5fb9e1ed2dd" -[[projects]] - branch = "master" - name = "github.com/maraino/go-mock" - packages = ["."] - revision = "c0658195ada54274d527cb1b09ff1d31765509d6" - [[projects]] name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" version = "v1.0.0" -[[projects]] - branch = "master" - name = "github.com/mitchellh/go-homedir" - packages = ["."] - revision = "b8bc1bf767474819792c23f32d8286a45736f1c6" - [[projects]] name = "github.com/namsral/flag" packages = ["."] @@ -439,18 +303,6 @@ revision = "baf6536d6259209c3edfa2b22237af82942d3dfa" version = "v0.1.1" -[[projects]] - name = "github.com/pierrec/lz4" - packages = ["."] - revision = "2fcda4cb7018ce05a25959d2fe08c83e3329f169" - version = "v1.1" - -[[projects]] - name = "github.com/pierrec/xxHash" - packages = ["xxHash32"] - revision = "f051bb7f1d1aaf1b5a665d74fb6b0217712c69f7" - version = "v0.1.1" - [[projects]] name = "github.com/pkg/errors" packages = ["."] @@ -493,12 +345,6 @@ ] revision = "85fadb6e89903ef7cca6f6a804474cd5ea85b6e1" -[[projects]] - branch = "master" - name = "github.com/rcrowley/go-metrics" - packages = ["."] - revision = "e181e095bae94582363434144c61a9653aff6e50" - [[projects]] name = "github.com/satori/go.uuid" packages = ["."] @@ -550,12 +396,6 @@ packages = ["."] revision = "be1fbeda19366dea804f00efff2dd73a1642fdcc" -[[projects]] - branch = "master" - name = "github.com/willfaught/gockle" - packages = ["."] - revision = "4f254e1e0f0a12485963192ff605f61f1933e71f" - [[projects]] branch = "master" name = "golang.org/x/crypto" @@ -641,12 +481,6 @@ revision = "8050b9cbc271307e5a716a9d782803d09b0d6f2d" version = "v1.2.1" -[[projects]] - name = "gopkg.in/inf.v0" - packages = ["."] - revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" - version = "v0.9.0" - [[projects]] branch = "v2" name = "gopkg.in/yaml.v2" @@ -656,6 +490,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "8d88cea9e950184c84919c5a1e32a620023704ffca5326cfc681225ed2c6c19b" + inputs-digest = "14676c37735f9da673c5b437ebe2f1ddabdd1764b2e0bc2a98b756ec37073352" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index b4dc198baf..ea3908f531 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -36,7 +36,7 @@ required = [ [[constraint]] name = "github.com/ligato/cn-infra" - version = "1.4.1" + revision = "52432e9cff91cd15e5cd0001be7e1034253adf0a" [[constraint]] branch = "master" diff --git a/clientv1/linux/localclient/localclient_api.go b/clientv1/linux/localclient/localclient_api.go index 9f585d101a..339e1b6fd1 100644 --- a/clientv1/linux/localclient/localclient_api.go +++ b/clientv1/linux/localclient/localclient_api.go @@ -15,18 +15,17 @@ package localclient import ( - "github.com/ligato/cn-infra/core" "github.com/ligato/cn-infra/datasync/kvdbsync/local" "github.com/ligato/vpp-agent/clientv1/linux" "github.com/ligato/vpp-agent/clientv1/linux/dbadapter" ) // PluginID defines the name of Linux localclient plugin. -const PluginID core.PluginName = "LinuxPlugin_LOCAL_CLIENT" +//const PluginID core.PluginName = "LinuxPlugin_LOCAL_CLIENT" // DataResyncRequest allows creating a RESYNC request using convenient RESYNC // DSL and sending it locally through go channels (i.e. without using Data Store). -func DataResyncRequest(caller core.PluginName) linuxclient.DataResyncDSL { +func DataResyncRequest(caller string) linuxclient.DataResyncDSL { return dbadapter.NewDataResyncDSL(local.NewProtoTxn(local.Get().PropagateResync), nil /*no need to list anything*/) } @@ -34,6 +33,6 @@ func DataResyncRequest(caller core.PluginName) linuxclient.DataResyncDSL { // DataChangeRequest allows creating Data Change request(s) using convenient // Data Change DSL and sending it locally through go channels (i.e. without using // Data Store). -func DataChangeRequest(caller core.PluginName) linuxclient.DataChangeDSL { +func DataChangeRequest(caller string) linuxclient.DataChangeDSL { return dbadapter.NewDataChangeDSL(local.NewProtoTxn(local.Get().PropagateChanges)) } diff --git a/clientv1/vpp/localclient/localclient_api.go b/clientv1/vpp/localclient/localclient_api.go index ed7ab820fd..28637716a7 100644 --- a/clientv1/vpp/localclient/localclient_api.go +++ b/clientv1/vpp/localclient/localclient_api.go @@ -15,18 +15,17 @@ package localclient import ( - "github.com/ligato/cn-infra/core" "github.com/ligato/cn-infra/datasync/kvdbsync/local" "github.com/ligato/vpp-agent/clientv1/vpp" "github.com/ligato/vpp-agent/clientv1/vpp/dbadapter" ) // PluginID defines the name of VPP (vppplugin) localclient plugin. -const PluginID core.PluginName = "DefaultVppPlugins_LOCAL_CLIENT" +//const PluginID core.PluginName = "DefaultVppPlugins_LOCAL_CLIENT" // DataResyncRequest allows creating a RESYNC request using convenient RESYNC // DSL and sending it locally through go channels (i.e. without using Data Store). -func DataResyncRequest(caller core.PluginName) vppclient.DataResyncDSL { +func DataResyncRequest(caller string) vppclient.DataResyncDSL { return dbadapter.NewDataResyncDSL(local.NewProtoTxn(local.Get().PropagateResync), nil /*no need to list anything*/) } @@ -34,6 +33,6 @@ func DataResyncRequest(caller core.PluginName) vppclient.DataResyncDSL { // DataChangeRequest allows creating Data Change request(s) using convenient // Data Change DSL and sending it locally through go channels (i.e. without using // Data Store). -func DataChangeRequest(caller core.PluginName) vppclient.DataChangeDSL { +func DataChangeRequest(caller string) vppclient.DataChangeDSL { return dbadapter.NewDataChangeDSL(local.NewProtoTxn(local.Get().PropagateChanges)) } diff --git a/cmd/vpp-agent/main.go b/cmd/vpp-agent/main.go index aa28305b0a..6140c20bff 100644 --- a/cmd/vpp-agent/main.go +++ b/cmd/vpp-agent/main.go @@ -19,24 +19,18 @@ package main import ( "os" - "github.com/ligato/cn-infra/core" + "github.com/ligato/cn-infra/agent" "github.com/ligato/cn-infra/logging" log "github.com/ligato/cn-infra/logging/logrus" - flavor "github.com/ligato/vpp-agent/flavors/vpp" ) -// main is the main entry point into the VPP Agent. Firstly, a new CN-Infra -// Agent (app) is created, using the set of plugins defined in vpp_flavor -// (../../flavors/vpp). Secondly, the function calls EventLoopWithInterrupt() -// which initializes and starts all plugins and then waits for the user -// to terminate the VPP Agent process with SIGINT. All VPP Agent's work between -// the initialization and termination is performed by the plugins. func main() { - agent := flavor.NewAgent() + p := NewVppAgent() - err := core.EventLoopWithInterrupt(agent, nil) - if err != nil { - os.Exit(1) + a := agent.NewAgent(agent.AllPlugins(p)) + + if err := a.Run(); err != nil { + log.DefaultLogger().Fatal(err) } } diff --git a/cmd/vpp-agent/vpp_agent.go b/cmd/vpp-agent/vpp_agent.go new file mode 100644 index 0000000000..fb28d1e128 --- /dev/null +++ b/cmd/vpp-agent/vpp_agent.go @@ -0,0 +1,93 @@ +package main + +import ( + "github.com/ligato/cn-infra/datasync" + "github.com/ligato/cn-infra/datasync/kvdbsync" + "github.com/ligato/cn-infra/datasync/kvdbsync/local" + "github.com/ligato/cn-infra/datasync/msgsync" + "github.com/ligato/cn-infra/datasync/resync" + "github.com/ligato/cn-infra/db/keyval/etcd" + "github.com/ligato/cn-infra/rpc/rest" + "github.com/ligato/vpp-agent/plugins/govppmux" + "github.com/ligato/vpp-agent/plugins/linux" + "github.com/ligato/vpp-agent/plugins/telemetry" + "github.com/ligato/vpp-agent/plugins/vpp" + "github.com/ligato/vpp-agent/plugins/vpp/rpc" +) + +type VPPAgent struct { + GoVPP *govppmux.Plugin + Linux *linux.Plugin + VPP *vpp.Plugin + + IfStatePub *msgsync.Plugin + GRPCSvcPlugin *rpc.GRPCSvcPlugin + RESTAPIPlugin *rest.Plugin + TelemetryPlugin *telemetry.Plugin +} + +func NewVppAgent() *VPPAgent { + etcdDataSync := kvdbsync.NewPlugin( + kvdbsync.UseDeps(func(deps *kvdbsync.Deps) { + deps.KvPlugin = &etcd.DefaultPlugin + deps.ResyncOrch = &resync.DefaultPlugin + }), + ) + watcher := &datasync.CompositeKVProtoWatcher{ + Adapters: []datasync.KeyValProtoWatcher{ + local.Get(), + etcdDataSync, + }} + + /*govpp := govppmux.NewPlugin( + govppmux.UseDeps(govppmux.Deps{ + Resync: resync.DefaultPlugin, + }), + )*/ + govppPlugin := &govppmux.DefaultPlugin + + var linuxAPI vpp.LinuxpluginAPI + vppPlugin := vpp.NewPlugin( + vpp.UseDeps(func(deps *vpp.Deps) { + deps.Linux = linuxAPI + deps.GoVppmux = govppPlugin + deps.Publish = etcdDataSync + deps.Watch = watcher + deps.DataSyncs = map[string]datasync.KeyProtoValWriter{ + "etcd": etcdDataSync, + } + }), + ) + + linuxPlugin := linux.NewPlugin( + linux.UseDeps(func(deps *linux.Deps) { + deps.VPP = vppPlugin + deps.Watcher = watcher + }), + ) + linuxAPI = linuxPlugin + + return &VPPAgent{ + GoVPP: govppPlugin, + Linux: linuxPlugin, + VPP: vppPlugin, + } +} + +func (VPPAgent) String() string { + return "vpp-agent" +} + +func (VPPAgent) Init() error { + return nil +} + +func (VPPAgent) AfterInit() error { + // Manually run resync at the very end + resync.DefaultPlugin.DoResync() + return nil +} + +func (VPPAgent) Close() error { + return nil +} diff --git a/flavors/local/local_flavor.go b/flavors/local/local_flavor.go index 18ee3971ff..13e21d4198 100644 --- a/flavors/local/local_flavor.go +++ b/flavors/local/local_flavor.go @@ -15,18 +15,7 @@ // Package local defines flavor used for VPP agents managed only locally. package local -import ( - "github.com/ligato/cn-infra/core" - "github.com/ligato/cn-infra/flavors/local" - - "github.com/ligato/cn-infra/datasync" - local_sync "github.com/ligato/cn-infra/datasync/kvdbsync/local" - "github.com/ligato/vpp-agent/clientv1/linux/localclient" - "github.com/ligato/vpp-agent/plugins/govppmux" - "github.com/ligato/vpp-agent/plugins/linux" - "github.com/ligato/vpp-agent/plugins/vpp" -) - +/* // NewAgent returns a new instance of the Agent with plugins. // It is an alias for core.NewAgent() to implicit use of the FlavorVppLocal func NewAgent(opts ...core.Option) *core.Agent { @@ -108,3 +97,4 @@ func (opt *withPluginsOpt) Plugins(flavors ...core.Flavor) []*core.NamedPlugin { panic("wrong usage of vppLocal.WithPlugin() for other than FlavorVppLocal") } +*/ diff --git a/flavors/vpp/vpp_flavor.go b/flavors/vpp/vpp_flavor.go index b43404ed2c..750c2d6233 100644 --- a/flavors/vpp/vpp_flavor.go +++ b/flavors/vpp/vpp_flavor.go @@ -15,24 +15,7 @@ // Package vpp defines the standard flavor used for full-featured VPP agents. package vpp -import ( - "sync" - - "github.com/ligato/cn-infra/core" - "github.com/ligato/cn-infra/datasync" - local_sync "github.com/ligato/cn-infra/datasync/kvdbsync/local" - "github.com/ligato/cn-infra/datasync/msgsync" - "github.com/ligato/cn-infra/flavors/connectors" - "github.com/ligato/cn-infra/flavors/local" - "github.com/ligato/cn-infra/flavors/rpc" - "github.com/ligato/vpp-agent/plugins/govppmux" - "github.com/ligato/vpp-agent/plugins/linux" - "github.com/ligato/vpp-agent/plugins/rest" - "github.com/ligato/vpp-agent/plugins/telemetry" - "github.com/ligato/vpp-agent/plugins/vpp" - rpcsvc "github.com/ligato/vpp-agent/plugins/vpp/rpc" -) - +/* // kafkaIfStateTopic is the topic where interface state changes are published. const kafkaIfStateTopic = "if_state" @@ -96,10 +79,10 @@ func (f *Flavor) Inject() bool { &f.AllConnectorsFlavor.ConsulDataSync, }} - /* note: now configurable with `status-publishers` in vppplugin - f.VPP.Deps.PublishStatistics = &datasync.CompositeKVProtoWriter{Adapters: []datasync.KeyProtoValWriter{ - &f.AllConnectorsFlavor.ETCDDataSync, &f.AllConnectorsFlavor.RedisDataSync}, - }*/ + // note: now configurable with `status-publishers` in vppplugin + // f.VPP.Deps.PublishStatistics = &datasync.CompositeKVProtoWriter{Adapters: []datasync.KeyProtoValWriter{ + // &f.AllConnectorsFlavor.ETCDDataSync, &f.AllConnectorsFlavor.RedisDataSync}, + //} f.VPP.Deps.DataSyncs = map[string]datasync.KeyProtoValWriter{ "etcd": &f.AllConnectorsFlavor.ETCDDataSync, "redis": &f.AllConnectorsFlavor.RedisDataSync, @@ -191,3 +174,4 @@ func (opt *withPluginsOpt) Plugins(flavors ...core.Flavor) []*core.NamedPlugin { panic("wrong usage of vpp.WithPlugin() for other than Flavor") } +*/ diff --git a/idxvpp/api.go b/idxvpp/api.go index 23bed6baef..3f4452860e 100644 --- a/idxvpp/api.go +++ b/idxvpp/api.go @@ -17,7 +17,6 @@ package idxvpp import ( "errors" - "github.com/ligato/cn-infra/core" "github.com/ligato/cn-infra/idxmap" ) @@ -142,5 +141,5 @@ type NameToIdx interface { // ifIdxEv.Done() // ... // } - Watch(subscriber core.PluginName, callback func(NameToIdxDto)) + Watch(subscriber string, callback func(NameToIdxDto)) } diff --git a/idxvpp/nametoidx/inmemory_name_to_idx_mapping.go b/idxvpp/nametoidx/inmemory_name_to_idx_mapping.go index 32769bb973..ef8d1f938b 100644 --- a/idxvpp/nametoidx/inmemory_name_to_idx_mapping.go +++ b/idxvpp/nametoidx/inmemory_name_to_idx_mapping.go @@ -18,9 +18,9 @@ import ( "strconv" "time" - "github.com/ligato/cn-infra/core" "github.com/ligato/cn-infra/idxmap" "github.com/ligato/cn-infra/idxmap/mem" + "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/vpp-agent/idxvpp" @@ -142,7 +142,7 @@ func (mem *nameToIdxMem) ListNames() (names []string) { // Watch starts monitoring a change in the mapping. When yhe change occurs, the callback is called. // ToChan utility can be used to receive changes through channel. -func (mem *nameToIdxMem) Watch(subscriber core.PluginName, callback func(idxvpp.NameToIdxDto)) { +func (mem *nameToIdxMem) Watch(subscriber string, callback func(idxvpp.NameToIdxDto)) { watcher := func(dto idxmap.NamedMappingGenericEvent) { internalMeta, ok := dto.Value.(*nameToIdxMeta) if !ok { @@ -156,7 +156,7 @@ func (mem *nameToIdxMem) Watch(subscriber core.PluginName, callback func(idxvpp. } callback(msg) } - mem.internal.Watch(subscriber, watcher) + mem.internal.Watch(infra.PluginName(subscriber), watcher) } // ToChan is an utility that allows to receive notification through a channel. diff --git a/idxvpp/persist/persistent_name_mapping.go b/idxvpp/persist/persistent_name_mapping.go index ac5175fc56..5d45bfad26 100644 --- a/idxvpp/persist/persistent_name_mapping.go +++ b/idxvpp/persist/persistent_name_mapping.go @@ -26,7 +26,6 @@ import ( "sync" "time" - "github.com/ligato/cn-infra/core" "github.com/ligato/cn-infra/logging" log "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/vpp-agent/idxvpp" @@ -71,7 +70,7 @@ func Marshalling(agentLabel string, idxMap idxvpp.NameToIdx, loadedFromFile idxv return err } - idxMap.Watch(core.PluginName("idxpersist"), nametoidx.ToChan(changes)) + idxMap.Watch("idxpersist", nametoidx.ToChan(changes)) err = persist.loadIdxMapFile(loadedFromFile) if err != nil { diff --git a/plugins/govppmux/options.go b/plugins/govppmux/options.go new file mode 100644 index 0000000000..b217a6f1f6 --- /dev/null +++ b/plugins/govppmux/options.go @@ -0,0 +1,57 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package govppmux + +import ( + "github.com/ligato/cn-infra/config" + "github.com/ligato/cn-infra/health/statuscheck" + "github.com/ligato/cn-infra/logging" +) + +type Plugin = GOVPPPlugin + +// DefaultPlugin is default instance of Plugin +var DefaultPlugin = *NewPlugin() + +// NewPlugin creates a new Plugin with the provides Options +func NewPlugin(opts ...Option) *Plugin { + p := &Plugin{} + + p.PluginName = "govpp" + p.StatusCheck = &statuscheck.DefaultPlugin + + for _, o := range opts { + o(p) + } + + if p.Deps.Log == nil { + p.Deps.Log = logging.ForPlugin(p.String()) + } + if p.Deps.PluginConfig == nil { + p.Deps.PluginConfig = config.ForPlugin(p.String()) + } + + return p +} + +// Option is a function that acts on a Plugin to inject Dependencies or configuration +type Option func(*Plugin) + +// UseDeps returns Option that can inject custom dependencies. +func UseDeps(cb func(*Deps)) Option { + return func(p *Plugin) { + cb(&p.Deps) + } +} diff --git a/plugins/govppmux/plugin_impl_govppmux.go b/plugins/govppmux/plugin_impl_govppmux.go index c91c09f69e..0d3217ba7e 100644 --- a/plugins/govppmux/plugin_impl_govppmux.go +++ b/plugins/govppmux/plugin_impl_govppmux.go @@ -26,8 +26,8 @@ import ( govpp "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/datasync/resync" - "github.com/ligato/cn-infra/flavors/local" "github.com/ligato/cn-infra/health/statuscheck" + "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/vpp-agent/plugins/govppmux/vppcalls" @@ -39,7 +39,7 @@ func init() { // GOVPPPlugin implements the govppmux plugin interface. type GOVPPPlugin struct { - Deps // Inject. + Deps vppConn *govpp.Connection vppAdapter adapter.VppAdapter @@ -58,8 +58,9 @@ type GOVPPPlugin struct { // Deps groups injected dependencies of plugin // so that they do not mix with other plugin fields. type Deps struct { - local.PluginInfraDeps // inject - Resync *resync.Plugin + infra.Deps + StatusCheck statuscheck.PluginStatusWriter + Resync *resync.Plugin } // Config groups the configurable parameter of GoVpp. diff --git a/plugins/linux/data_resync.go b/plugins/linux/data_resync.go index 2d86fff851..03e47caca6 100644 --- a/plugins/linux/data_resync.go +++ b/plugins/linux/data_resync.go @@ -151,7 +151,7 @@ func resyncAppendRoutes(resyncData datasync.KeyValIterator, req *DataResyncReq) func (plugin *Plugin) subscribeWatcher() (err error) { plugin.Log.Debug("subscribeWatcher begin") - plugin.ifIndexes.WatchNameToIdx(plugin.PluginName, plugin.ifIndexesWatchChan) + plugin.ifIndexes.WatchNameToIdx(plugin.PluginName.String(), plugin.ifIndexesWatchChan) plugin.watchDataReg, err = plugin.Watcher. Watch("linuxplugin", plugin.changeChan, plugin.resyncChan, interfaces.InterfaceKeyPrefix(), diff --git a/plugins/linux/ifplugin/ifaceidx/linux_if_index.go b/plugins/linux/ifplugin/ifaceidx/linux_if_index.go index 256179be6d..0a018d7119 100644 --- a/plugins/linux/ifplugin/ifaceidx/linux_if_index.go +++ b/plugins/linux/ifplugin/ifaceidx/linux_if_index.go @@ -15,7 +15,6 @@ package ifaceidx import ( - "github.com/ligato/cn-infra/core" log "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/vpp-agent/idxvpp" "github.com/ligato/vpp-agent/idxvpp/nametoidx" @@ -57,7 +56,7 @@ type LinuxIfIndex interface { LookupNameByHostIfName(hostIfName string) []string // WatchNameToIdx allows to subscribe for watching changes in linuxIfIndex mapping. - WatchNameToIdx(subscriber core.PluginName, pluginChannel chan LinuxIfIndexDto) + WatchNameToIdx(subscriber string, pluginChannel chan LinuxIfIndexDto) } // LinuxIfIndexRW is mapping between software interface indices (used internally in VPP) @@ -170,7 +169,7 @@ func (linuxIfIdx *linuxIfIndex) UnregisterName(name string) (idx uint32, metadat } // WatchNameToIdx allows to subscribe for watching changes in linuxIfIndex mapping. -func (linuxIfIdx *linuxIfIndex) WatchNameToIdx(subscriber core.PluginName, pluginChannel chan LinuxIfIndexDto) { +func (linuxIfIdx *linuxIfIndex) WatchNameToIdx(subscriber string, pluginChannel chan LinuxIfIndexDto) { ch := make(chan idxvpp.NameToIdxDto) linuxIfIdx.mapping.Watch(subscriber, nametoidx.ToChan(ch)) go func() { diff --git a/plugins/linux/l3plugin/l3idx/linux_l3_arp_index.go b/plugins/linux/l3plugin/l3idx/linux_l3_arp_index.go index 935fcdfeba..6215b1b4e2 100644 --- a/plugins/linux/l3plugin/l3idx/linux_l3_arp_index.go +++ b/plugins/linux/l3plugin/l3idx/linux_l3_arp_index.go @@ -15,7 +15,6 @@ package l3idx import ( - "github.com/ligato/cn-infra/core" "github.com/ligato/vpp-agent/idxvpp" "github.com/ligato/vpp-agent/idxvpp/nametoidx" "github.com/ligato/vpp-agent/plugins/linux/model/l3" @@ -38,7 +37,7 @@ type LinuxARPIndex interface { LookupNameByHostIfName(hostIfName string) []string // WatchNameToIdx allows to subscribe for watching changes in linuxIfIndex mapping - WatchNameToIdx(subscriber core.PluginName, pluginChannel chan LinuxARPIndexDto) + WatchNameToIdx(subscriber string, pluginChannel chan LinuxARPIndexDto) } // LinuxARPIndexRW is mapping between software ARP indexes (used internally in VPP) @@ -111,7 +110,7 @@ func (linuxArpIndex *linuxArpIndex) UnregisterName(name string) (idx uint32, met } // WatchNameToIdx allows to subscribe for watching changes in linuxIfIndex mapping -func (linuxArpIndex *linuxArpIndex) WatchNameToIdx(subscriber core.PluginName, pluginChannel chan LinuxARPIndexDto) { +func (linuxArpIndex *linuxArpIndex) WatchNameToIdx(subscriber string, pluginChannel chan LinuxARPIndexDto) { ch := make(chan idxvpp.NameToIdxDto) linuxArpIndex.mapping.Watch(subscriber, nametoidx.ToChan(ch)) go func() { diff --git a/plugins/linux/l3plugin/l3idx/linux_l3_route_index.go b/plugins/linux/l3plugin/l3idx/linux_l3_route_index.go index 00b76a3c9b..8907086023 100644 --- a/plugins/linux/l3plugin/l3idx/linux_l3_route_index.go +++ b/plugins/linux/l3plugin/l3idx/linux_l3_route_index.go @@ -17,7 +17,6 @@ package l3idx import ( "net" - "github.com/ligato/cn-infra/core" "github.com/ligato/vpp-agent/idxvpp" "github.com/ligato/vpp-agent/idxvpp/nametoidx" "github.com/ligato/vpp-agent/plugins/linux/model/l3" @@ -44,7 +43,7 @@ type LinuxRouteIndex interface { LookupRouteByIP(ns *l3.LinuxStaticRoutes_Route_Namespace, ipAddress string) (*l3.LinuxStaticRoutes_Route, error) // WatchNameToIdx allows to subscribe for watching changes in linuxIfIndex mapping - WatchNameToIdx(subscriber core.PluginName, pluginChannel chan LinuxRouteIndexDto) + WatchNameToIdx(subscriber string, pluginChannel chan LinuxRouteIndexDto) } // LinuxRouteIndexRW is mapping between software route indexes (used internally in VPP) @@ -160,7 +159,7 @@ func (linuxRouteIndex *linuxRouteIndex) UnregisterName(name string) (idx uint32, } // WatchNameToIdx allows to subscribe for watching changes in linuxIfIndex mapping -func (linuxRouteIndex *linuxRouteIndex) WatchNameToIdx(subscriber core.PluginName, pluginChannel chan LinuxRouteIndexDto) { +func (linuxRouteIndex *linuxRouteIndex) WatchNameToIdx(subscriber string, pluginChannel chan LinuxRouteIndexDto) { ch := make(chan idxvpp.NameToIdxDto) linuxRouteIndex.mapping.Watch(subscriber, nametoidx.ToChan(ch)) go func() { diff --git a/plugins/linux/linuxplugin_init.go b/plugins/linux/linuxplugin_init.go index eb70015a90..a8819c236b 100644 --- a/plugins/linux/linuxplugin_init.go +++ b/plugins/linux/linuxplugin_init.go @@ -21,8 +21,10 @@ import ( "sync" "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/flavors/local" + "github.com/ligato/cn-infra/health/statuscheck" + "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/logging/measure" + "github.com/ligato/cn-infra/servicelabel" "github.com/ligato/cn-infra/utils/safeclose" "github.com/ligato/vpp-agent/idxvpp/nametoidx" "github.com/ligato/vpp-agent/plugins/linux/ifplugin" @@ -77,10 +79,13 @@ type Plugin struct { // Deps groups injected dependencies of plugin // so that they do not mix with other plugin fields. type Deps struct { - local.PluginInfraDeps // injected - Watcher datasync.KeyValProtoWatcher // injected - VPP *vpp.Plugin - WatchEventsMutex *sync.Mutex + infra.Deps + StatusCheck statuscheck.PluginStatusWriter + ServiceLabel servicelabel.ReaderAPI + + Watcher datasync.KeyValProtoWatcher // injected + VPP *vpp.Plugin + WatchEventsMutex *sync.Mutex } // LinuxConfig holds the linuxplugin configuration. @@ -110,7 +115,7 @@ func (plugin *Plugin) GetLinuxRouteIndexes() l3idx.LinuxRouteIndex { // InjectVppIfIndexes injects VPP interfaces mapping into Linux plugin func (plugin *Plugin) InjectVppIfIndexes(indexes ifaceVPP.SwIfIndex) { plugin.vppIfIndexes = indexes - plugin.vppIfIndexes.WatchNameToIdx(plugin.PluginName, plugin.vppIfIndexesWatchChan) + plugin.vppIfIndexes.WatchNameToIdx(plugin.PluginName.String(), plugin.vppIfIndexesWatchChan) } // Init gets handlers for ETCD and Kafka and delegates them to ifConfigurator. @@ -239,7 +244,7 @@ func (plugin *Plugin) initL3() error { func (plugin *Plugin) retrieveLinuxConfig() (*LinuxConfig, error) { config := &LinuxConfig{} - found, err := plugin.PluginInfraDeps.GetValue(config) + found, err := plugin.PluginConfig.GetValue(config) if !found { plugin.Log.Debug("Linuxplugin config not found") return nil, nil diff --git a/plugins/linux/options.go b/plugins/linux/options.go new file mode 100644 index 0000000000..ed990fb674 --- /dev/null +++ b/plugins/linux/options.go @@ -0,0 +1,57 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "github.com/ligato/cn-infra/config" + "github.com/ligato/cn-infra/health/statuscheck" + "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/servicelabel" +) + +// DefaultPlugin is default instance of Plugin +//var DefaultPlugin = NewPlugin() + +// NewPlugin creates a new Plugin with the provides Options +func NewPlugin(opts ...Option) *Plugin { + p := &Plugin{} + + p.PluginName = "linux" + p.StatusCheck = &statuscheck.DefaultPlugin + p.ServiceLabel = &servicelabel.DefaultPlugin + + for _, o := range opts { + o(p) + } + + if p.Deps.Log == nil { + p.Deps.Log = logging.ForPlugin(p.String()) + } + if p.Deps.PluginConfig == nil { + p.Deps.PluginConfig = config.ForPlugin(p.String()) + } + + return p +} + +// Option is a function that acts on a Plugin to inject Dependencies or configuration +type Option func(*Plugin) + +// UseDeps returns Option that can inject custom dependencies. +func UseDeps(cb func(*Deps)) Option { + return func(p *Plugin) { + cb(&p.Deps) + } +} diff --git a/plugins/rest/plugin_impl_rest.go b/plugins/rest/plugin_impl_rest.go index 52fd42df19..c22acb95ac 100644 --- a/plugins/rest/plugin_impl_rest.go +++ b/plugins/rest/plugin_impl_rest.go @@ -17,7 +17,7 @@ package rest import ( "fmt" - "github.com/ligato/cn-infra/flavors/local" + "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/rpc/rest" "github.com/ligato/vpp-agent/plugins/govppmux" ) @@ -35,7 +35,7 @@ type Plugin struct { // Deps represents dependencies of Rest Plugin type Deps struct { - local.PluginInfraDeps + infra.Deps HTTPHandlers rest.HTTPHandlers GoVppmux govppmux.API } diff --git a/plugins/telemetry/config.go b/plugins/telemetry/config.go index 35a0eb3487..3fe7eee9c5 100644 --- a/plugins/telemetry/config.go +++ b/plugins/telemetry/config.go @@ -27,7 +27,7 @@ type Config struct { // getConfig returns telemetry plugin file configuration if exists func (p *Plugin) getConfig() (*Config, error) { config := &Config{} - found, err := p.PluginInfraDeps.GetValue(config) + found, err := p.PluginConfig.GetValue(config) if !found { p.Log.Debug("Telemetry config not found") return nil, nil diff --git a/plugins/telemetry/telemetry.go b/plugins/telemetry/telemetry.go index 5e52037390..daf082c202 100644 --- a/plugins/telemetry/telemetry.go +++ b/plugins/telemetry/telemetry.go @@ -19,8 +19,9 @@ import ( "time" govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/flavors/local" + "github.com/ligato/cn-infra/infra" prom "github.com/ligato/cn-infra/rpc/prometheus" + "github.com/ligato/cn-infra/servicelabel" "github.com/ligato/vpp-agent/plugins/govppmux" "github.com/ligato/vpp-agent/plugins/govppmux/vppcalls" "github.com/prometheus/client_golang/prometheus" @@ -105,7 +106,8 @@ type Plugin struct { // Deps represents dependencies of Telemetry Plugin type Deps struct { - local.PluginInfraDeps + infra.Deps + ServiceLabel servicelabel.ReaderAPI GoVppmux govppmux.API Prometheus prom.API diff --git a/plugins/vpp/aclplugin/aclidx/aclidx.go b/plugins/vpp/aclplugin/aclidx/aclidx.go index 3e046403bd..6cb62001d3 100644 --- a/plugins/vpp/aclplugin/aclidx/aclidx.go +++ b/plugins/vpp/aclplugin/aclidx/aclidx.go @@ -15,7 +15,6 @@ package aclidx import ( - "github.com/ligato/cn-infra/core" "github.com/ligato/vpp-agent/idxvpp" "github.com/ligato/vpp-agent/idxvpp/nametoidx" acl_model "github.com/ligato/vpp-agent/plugins/vpp/model/acl" @@ -34,7 +33,7 @@ type AclIndex interface { LookupName(idx uint32) (name string, metadata *acl_model.AccessLists_Acl, exists bool) // WatchNameToIdx allows to subscribe for watching changes in aclIndex mapping. - WatchNameToIdx(subscriber core.PluginName, pluginChannel chan AclIdxDto) + WatchNameToIdx(subscriber string, pluginChannel chan AclIdxDto) } // AclIndexRW is mapping between ACL indices (used internally in VPP) and ACL names. @@ -117,7 +116,7 @@ func (acl *aclIndex) castMetadata(meta interface{}) *acl_model.AccessLists_Acl { } // WatchNameToIdx allows to subscribe for watching changes in swIfIndex mapping. -func (acl *aclIndex) WatchNameToIdx(subscriber core.PluginName, pluginChannel chan AclIdxDto) { +func (acl *aclIndex) WatchNameToIdx(subscriber string, pluginChannel chan AclIdxDto) { ch := make(chan idxvpp.NameToIdxDto) acl.mapping.Watch(subscriber, nametoidx.ToChan(ch)) go func() { diff --git a/plugins/vpp/data_resync.go b/plugins/vpp/data_resync.go index e056517254..2eb68e245e 100644 --- a/plugins/vpp/data_resync.go +++ b/plugins/vpp/data_resync.go @@ -15,13 +15,11 @@ package vpp import ( + "fmt" "strconv" "strings" - "time" - "fmt" - "github.com/ligato/cn-infra/datasync" "github.com/ligato/cn-infra/logging" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" @@ -745,12 +743,12 @@ func appendResyncSR(resyncData datasync.KeyValIterator, req *DataResyncReq) (num // All registration for above channel select (it ensures proper order during initialization) are put here. func (plugin *Plugin) subscribeWatcher() (err error) { plugin.Log.Debug("subscribeWatcher begin") - plugin.swIfIndexes.WatchNameToIdx(plugin.PluginName, plugin.ifIdxWatchCh) + plugin.swIfIndexes.WatchNameToIdx(plugin.PluginName.String(), plugin.ifIdxWatchCh) plugin.Log.Debug("swIfIndexes watch registration finished") - plugin.bdIndexes.WatchNameToIdx(plugin.PluginName, plugin.bdIdxWatchCh) + plugin.bdIndexes.WatchNameToIdx(plugin.PluginName.String(), plugin.bdIdxWatchCh) plugin.Log.Debug("bdIndexes watch registration finished") if plugin.linuxIfIndexes != nil { - plugin.linuxIfIndexes.WatchNameToIdx(plugin.PluginName, plugin.linuxIfIdxWatchCh) + plugin.linuxIfIndexes.WatchNameToIdx(plugin.PluginName.String(), plugin.linuxIfIdxWatchCh) plugin.Log.Debug("linuxIfIndexes watch registration finished") } diff --git a/plugins/vpp/ifplugin/ifaceidx/dhcp_index.go b/plugins/vpp/ifplugin/ifaceidx/dhcp_index.go index d7adad5836..c588883526 100644 --- a/plugins/vpp/ifplugin/ifaceidx/dhcp_index.go +++ b/plugins/vpp/ifplugin/ifaceidx/dhcp_index.go @@ -15,7 +15,6 @@ package ifaceidx import ( - "github.com/ligato/cn-infra/core" log "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/vpp-agent/idxvpp" "github.com/ligato/vpp-agent/idxvpp/nametoidx" @@ -44,7 +43,7 @@ type DhcpIndex interface { LookupName(idx uint32) (name string, metadata *DHCPSettings, exists bool) // WatchNameToIdx allows to subscribe for watching changes in DhcpIndex mapping. - WatchNameToIdx(subscriber core.PluginName, pluginChannel chan DhcpIdxDto) + WatchNameToIdx(subscriber string, pluginChannel chan DhcpIdxDto) } // DhcpIndexRW is mapping between software interface names, indices @@ -132,7 +131,7 @@ func (dhcp *dhcpIndex) LookupName(idx uint32) (name string, metadata *DHCPSettin } // WatchNameToIdx allows to subscribe for watching changes in dhcpIndex mapping. -func (dhcp *dhcpIndex) WatchNameToIdx(subscriber core.PluginName, pluginChannel chan DhcpIdxDto) { +func (dhcp *dhcpIndex) WatchNameToIdx(subscriber string, pluginChannel chan DhcpIdxDto) { ch := make(chan idxvpp.NameToIdxDto) dhcp.mapping.Watch(subscriber, nametoidx.ToChan(ch)) go func() { diff --git a/plugins/vpp/ifplugin/ifaceidx/sw_if_index.go b/plugins/vpp/ifplugin/ifaceidx/sw_if_index.go index f5b523c8c0..f1d87c93de 100644 --- a/plugins/vpp/ifplugin/ifaceidx/sw_if_index.go +++ b/plugins/vpp/ifplugin/ifaceidx/sw_if_index.go @@ -15,7 +15,6 @@ package ifaceidx import ( - "github.com/ligato/cn-infra/core" "github.com/ligato/vpp-agent/idxvpp" "github.com/ligato/vpp-agent/idxvpp/nametoidx" intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" @@ -37,7 +36,7 @@ type SwIfIndex interface { LookupNameByIP(ip string) []string // WatchNameToIdx allows to subscribe for watching changes in swIfIndex mapping. - WatchNameToIdx(subscriber core.PluginName, pluginChannel chan SwIfIdxDto) + WatchNameToIdx(subscriber string, pluginChannel chan SwIfIdxDto) } // SwIfIndexRW is mapping between software interface indices @@ -153,7 +152,7 @@ func (swi *swIfIndex) castMetadata(meta interface{}) *intf.Interfaces_Interface } // WatchNameToIdx allows to subscribe for watching changes in swIfIndex mapping. -func (swi *swIfIndex) WatchNameToIdx(subscriber core.PluginName, pluginChannel chan SwIfIdxDto) { +func (swi *swIfIndex) WatchNameToIdx(subscriber string, pluginChannel chan SwIfIdxDto) { ch := make(chan idxvpp.NameToIdxDto) swi.mapping.Watch(subscriber, nametoidx.ToChan(ch)) go func() { diff --git a/plugins/vpp/ifplugin/interface_state.go b/plugins/vpp/ifplugin/interface_state.go index 0ddebd8576..004291fc64 100644 --- a/plugins/vpp/ifplugin/interface_state.go +++ b/plugins/vpp/ifplugin/interface_state.go @@ -15,6 +15,7 @@ package ifplugin import ( + "bytes" "context" "fmt" "net" @@ -22,10 +23,7 @@ import ( "sync" "time" - "bytes" - govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/core" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/utils/safeclose" "github.com/ligato/vpp-agent/plugins/govppmux" @@ -106,7 +104,7 @@ func (plugin *InterfaceStateUpdater) Init(logger logging.PluginLogger, goVppMux } plugin.swIdxChan = make(chan ifaceidx.SwIfIdxDto, 100) - swIfIndexes.WatchNameToIdx(core.PluginName("ifplugin_ifstate"), plugin.swIdxChan) + swIfIndexes.WatchNameToIdx("ifplugin_ifstate", plugin.swIdxChan) plugin.notifChan = notifChan // Create child context diff --git a/plugins/vpp/l2plugin/bd_state.go b/plugins/vpp/l2plugin/bd_state.go index d9d03478d1..72bf312400 100644 --- a/plugins/vpp/l2plugin/bd_state.go +++ b/plugins/vpp/l2plugin/bd_state.go @@ -20,7 +20,6 @@ import ( "time" govppapi "git.fd.io/govpp.git/api" - "github.com/ligato/cn-infra/core" "github.com/ligato/cn-infra/logging" "github.com/ligato/vpp-agent/plugins/govppmux" l2_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" @@ -84,7 +83,7 @@ func (plugin *BridgeDomainStateUpdater) Init(logger logging.PluginLogger, goVppM // Name-to-index watcher plugin.bdIdxChan = make(chan l2idx.BdChangeDto, 100) - bdIndexes.WatchNameToIdx(core.PluginName("bdplugin_bdstate"), plugin.bdIdxChan) + bdIndexes.WatchNameToIdx("bdplugin_bdstate", plugin.bdIdxChan) var childCtx context.Context childCtx, plugin.cancel = context.WithCancel(ctx) diff --git a/plugins/vpp/l2plugin/l2idx/bd_index.go b/plugins/vpp/l2plugin/l2idx/bd_index.go index caae04d35c..73344b1562 100644 --- a/plugins/vpp/l2plugin/l2idx/bd_index.go +++ b/plugins/vpp/l2plugin/l2idx/bd_index.go @@ -15,7 +15,6 @@ package l2idx import ( - "github.com/ligato/cn-infra/core" "github.com/ligato/vpp-agent/idxvpp" "github.com/ligato/vpp-agent/idxvpp/nametoidx" "github.com/ligato/vpp-agent/plugins/vpp/model/l2" @@ -39,7 +38,7 @@ type BDIndex interface { LookupConfiguredIfsForBd(bdName string) ([]string, bool) // WatchNameToIdx allows to subscribe for watching changes in bdIndex mapping - WatchNameToIdx(subscriber core.PluginName, pluginChannel chan BdChangeDto) + WatchNameToIdx(subscriber string, pluginChannel chan BdChangeDto) } // BDIndexRW is mapping between indices (used internally in VPP) and Bridge Domain names. @@ -191,7 +190,7 @@ func (bdi *bdIndex) LookupConfiguredIfsForBd(bdName string) ([]string, bool) { } // WatchNameToIdx allows to subscribe for watching changes in bdIndex mapping. -func (bdi *bdIndex) WatchNameToIdx(subscriber core.PluginName, pluginChannel chan BdChangeDto) { +func (bdi *bdIndex) WatchNameToIdx(subscriber string, pluginChannel chan BdChangeDto) { ch := make(chan idxvpp.NameToIdxDto) bdi.mapping.Watch(subscriber, nametoidx.ToChan(ch)) go func() { diff --git a/plugins/vpp/l2plugin/l2idx/fib_index.go b/plugins/vpp/l2plugin/l2idx/fib_index.go index 3d13634e31..82ceb7f04a 100644 --- a/plugins/vpp/l2plugin/l2idx/fib_index.go +++ b/plugins/vpp/l2plugin/l2idx/fib_index.go @@ -15,7 +15,6 @@ package l2idx import ( - "github.com/ligato/cn-infra/core" "github.com/ligato/vpp-agent/idxvpp" "github.com/ligato/vpp-agent/idxvpp/nametoidx" "github.com/ligato/vpp-agent/plugins/vpp/model/l2" @@ -33,7 +32,7 @@ type FIBIndex interface { LookupName(idx uint32) (name string, metadata *l2.FibTable_FibEntry, exists bool) // WatchNameToIdx allows to subscribe for watching changes in fibIndex mapping - WatchNameToIdx(subscriber core.PluginName, pluginChannel chan FibChangeDto) + WatchNameToIdx(subscriber string, pluginChannel chan FibChangeDto) } // FIBIndexRW is mapping between indices (used internally in VPP) and FIB entries. @@ -116,7 +115,7 @@ func (fib *fibIndex) LookupName(idx uint32) (name string, metadata *l2.FibTable_ } // WatchNameToIdx allows to subscribe for watching changes in fibIndex mapping. -func (fib *fibIndex) WatchNameToIdx(subscriber core.PluginName, pluginChannel chan FibChangeDto) { +func (fib *fibIndex) WatchNameToIdx(subscriber string, pluginChannel chan FibChangeDto) { ch := make(chan idxvpp.NameToIdxDto) fib.mapping.Watch(subscriber, nametoidx.ToChan(ch)) go func() { diff --git a/plugins/vpp/l2plugin/l2idx/xc_index.go b/plugins/vpp/l2plugin/l2idx/xc_index.go index 12ed17318c..f1dc507301 100644 --- a/plugins/vpp/l2plugin/l2idx/xc_index.go +++ b/plugins/vpp/l2plugin/l2idx/xc_index.go @@ -15,7 +15,6 @@ package l2idx import ( - "github.com/ligato/cn-infra/core" "github.com/ligato/vpp-agent/idxvpp" "github.com/ligato/vpp-agent/idxvpp/nametoidx" "github.com/ligato/vpp-agent/plugins/vpp/model/l2" @@ -33,7 +32,7 @@ type XcIndex interface { LookupName(idx uint32) (name string, metadata *l2.XConnectPairs_XConnectPair, exists bool) // WatchNameToIdx allows to subscribe for watching changes in xcIndex mapping - WatchNameToIdx(subscriber core.PluginName, pluginChannel chan XcChangeDto) + WatchNameToIdx(subscriber string, pluginChannel chan XcChangeDto) } // XcIndexRW is mapping between indices (used internally in VPP) and cross connect entries. @@ -116,7 +115,7 @@ func (xc *xcIndex) LookupName(idx uint32) (name string, metadata *l2.XConnectPai } // WatchNameToIdx allows to subscribe for watching changes in xcIndex mapping. -func (xc *xcIndex) WatchNameToIdx(subscriber core.PluginName, pluginChannel chan XcChangeDto) { +func (xc *xcIndex) WatchNameToIdx(subscriber string, pluginChannel chan XcChangeDto) { ch := make(chan idxvpp.NameToIdxDto) xc.mapping.Watch(subscriber, nametoidx.ToChan(ch)) go func() { diff --git a/plugins/vpp/l3plugin/l3idx/l3_arp_index.go b/plugins/vpp/l3plugin/l3idx/l3_arp_index.go index a18356d9b4..763e47ff01 100644 --- a/plugins/vpp/l3plugin/l3idx/l3_arp_index.go +++ b/plugins/vpp/l3plugin/l3idx/l3_arp_index.go @@ -15,7 +15,6 @@ package l3idx import ( - "github.com/ligato/cn-infra/core" "github.com/ligato/vpp-agent/idxvpp" "github.com/ligato/vpp-agent/idxvpp/nametoidx" "github.com/ligato/vpp-agent/plugins/vpp/model/l3" @@ -36,7 +35,7 @@ type ARPIndex interface { LookupNamesByInterface(ifName string) []*l3.ArpTable_ArpEntry // WatchNameToIdx allows to subscribe for watching changes in SwIfIndex mapping - WatchNameToIdx(subscriber core.PluginName, pluginChannel chan ARPIndexDto) + WatchNameToIdx(subscriber string, pluginChannel chan ARPIndexDto) } // ARPIndexRW is mapping between software ARP indexes (used internally in VPP) @@ -124,7 +123,7 @@ func (arpIndex *ArpIndex) Clear() { } // WatchNameToIdx allows to subscribe for watching changes in SwIfIndex mapping -func (arpIndex *ArpIndex) WatchNameToIdx(subscriber core.PluginName, pluginChannel chan ARPIndexDto) { +func (arpIndex *ArpIndex) WatchNameToIdx(subscriber string, pluginChannel chan ARPIndexDto) { ch := make(chan idxvpp.NameToIdxDto) arpIndex.mapping.Watch(subscriber, nametoidx.ToChan(ch)) go func() { diff --git a/plugins/vpp/l4plugin/nsidx/ns_index.go b/plugins/vpp/l4plugin/nsidx/ns_index.go index d215d0a6fe..ae8472f040 100644 --- a/plugins/vpp/l4plugin/nsidx/ns_index.go +++ b/plugins/vpp/l4plugin/nsidx/ns_index.go @@ -15,7 +15,6 @@ package nsidx import ( - "github.com/ligato/cn-infra/core" "github.com/ligato/vpp-agent/idxvpp" "github.com/ligato/vpp-agent/idxvpp/nametoidx" "github.com/ligato/vpp-agent/plugins/vpp/model/l4" @@ -39,7 +38,7 @@ type AppNsIndex interface { ListNames() (names []string) // WatchNameToIdx allows to subscribe for watching changes in appNsIndex mapping - WatchNameToIdx(subscriber core.PluginName, pluginChannel chan ChangeDto) + WatchNameToIdx(subscriber string, pluginChannel chan ChangeDto) } // AppNsIndexRW is mapping between indexes (used internally in VPP) and AppNamespace indexes. @@ -131,7 +130,7 @@ func (appNs *appNsIndex) ListNames() (names []string) { } // WatchNameToIdx allows to subscribe for watching changes in appNsIndex mapping -func (appNs *appNsIndex) WatchNameToIdx(subscriber core.PluginName, pluginChannel chan ChangeDto) { +func (appNs *appNsIndex) WatchNameToIdx(subscriber string, pluginChannel chan ChangeDto) { ch := make(chan idxvpp.NameToIdxDto) appNs.mapping.Watch(subscriber, nametoidx.ToChan(ch)) go func() { diff --git a/plugins/vpp/options.go b/plugins/vpp/options.go new file mode 100644 index 0000000000..a2f1a12034 --- /dev/null +++ b/plugins/vpp/options.go @@ -0,0 +1,57 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp + +import ( + "github.com/ligato/cn-infra/config" + "github.com/ligato/cn-infra/health/statuscheck" + "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/servicelabel" +) + +// DefaultPlugin is default instance of Plugin +//var DefaultPlugin = NewPlugin() + +// NewPlugin creates a new Plugin with the provides Options +func NewPlugin(opts ...Option) *Plugin { + p := &Plugin{} + + p.PluginName = "vpp" + p.StatusCheck = &statuscheck.DefaultPlugin + p.ServiceLabel = &servicelabel.DefaultPlugin + + for _, o := range opts { + o(p) + } + + if p.Deps.Log == nil { + p.Deps.Log = logging.ForPlugin(p.String()) + } + if p.Deps.PluginConfig == nil { + p.Deps.PluginConfig = config.ForPlugin(p.String()) + } + + return p +} + +// Option is a function that acts on a Plugin to inject Dependencies or configuration +type Option func(*Plugin) + +// UseDeps returns Option that can inject custom dependencies. +func UseDeps(cb func(*Deps)) Option { + return func(p *Plugin) { + cb(&p.Deps) + } +} diff --git a/plugins/vpp/plugin_impl_vpp.go b/plugins/vpp/plugin_impl_vpp.go index 22cfdd1057..e24d42a24c 100644 --- a/plugins/vpp/plugin_impl_vpp.go +++ b/plugins/vpp/plugin_impl_vpp.go @@ -21,9 +21,11 @@ import ( govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/flavors/local" + "github.com/ligato/cn-infra/health/statuscheck" + "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/cn-infra/messaging" + "github.com/ligato/cn-infra/servicelabel" "github.com/ligato/cn-infra/utils/safeclose" "github.com/ligato/vpp-agent/idxvpp" "github.com/ligato/vpp-agent/idxvpp/nametoidx" @@ -142,15 +144,16 @@ type Plugin struct { // Deps groups injected dependencies of plugin so that they do not mix with // other plugin fieldsMtu. type Deps struct { - // inject all below - local.PluginInfraDeps + infra.Deps + StatusCheck statuscheck.PluginStatusWriter + ServiceLabel servicelabel.ReaderAPI Publish datasync.KeyProtoValWriter PublishStatistics datasync.KeyProtoValWriter Watch datasync.KeyValProtoWatcher IfStatePub datasync.KeyProtoValWriter GoVppmux govppmux.API - Linux linuxpluginAPI + Linux LinuxpluginAPI GRPCSvc rpc.GRPCService DataSyncs map[string]datasync.KeyProtoValWriter @@ -165,7 +168,7 @@ type PluginConfig struct { StatusPublishers []string `json:"status-publishers"` } -type linuxpluginAPI interface { +type LinuxpluginAPI interface { // GetLinuxIfIndexes gives access to mapping of logical names (used in ETCD configuration) to corresponding Linux // interface indexes. This mapping is especially helpful for plugins that need to watch for newly added or deleted // Linux interfaces. @@ -263,7 +266,7 @@ func (plugin *Plugin) GetIPSecSPDIndexes() ipsecidx.SPDIndex { // Init gets handlers for ETCD and Messaging and delegates them to ifConfigurator & ifStateUpdater. func (plugin *Plugin) Init() error { plugin.Log.Debug("Initializing default plugins") - flag.Parse() + //flag.Parse() // Read config file and set all related fields plugin.fromConfigFile() diff --git a/plugins/vpp/rpc/services.go b/plugins/vpp/rpc/services.go index 949dbee4d1..09dd02dea7 100644 --- a/plugins/vpp/rpc/services.go +++ b/plugins/vpp/rpc/services.go @@ -19,7 +19,7 @@ package rpc import ( "fmt" - "github.com/ligato/cn-infra/flavors/local" + "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/rpc/grpc" "github.com/ligato/vpp-agent/clientv1/linux" @@ -41,7 +41,7 @@ type GRPCSvcPlugin struct { // GRPCSvcPluginDeps - dependencies of GRPCSvcPlugin type GRPCSvcPluginDeps struct { - local.PluginLogDeps + infra.Deps GRPCServer grpc.Server } diff --git a/plugins/vpp/watch_events.go b/plugins/vpp/watch_events.go index 01e0c7447b..2a6da826ba 100644 --- a/plugins/vpp/watch_events.go +++ b/plugins/vpp/watch_events.go @@ -42,6 +42,7 @@ func (plugin *Plugin) watchEvents(ctx context.Context) { for { select { case e := <-plugin.resyncConfigChan: + plugin.Log.Warnf("VPP RESYNC") runWithMutex(func() { plugin.onResyncEvent(e) }) diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore deleted file mode 100644 index c6c482dca8..0000000000 --- a/vendor/github.com/Shopify/sarama/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so -*.test - -# Folders -_obj -_test -.vagrant - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -coverage.txt diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml deleted file mode 100644 index ba1c0ab2dd..0000000000 --- a/vendor/github.com/Shopify/sarama/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -language: go -go: -- 1.7.x -- 1.8.x -- 1.9.x - -env: - global: - - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095 - - TOXIPROXY_ADDR=http://localhost:8474 - - KAFKA_INSTALL_ROOT=/home/travis/kafka - - KAFKA_HOSTNAME=localhost - - DEBUG=true - matrix: - - KAFKA_VERSION=0.10.2.1 - - KAFKA_VERSION=0.11.0.2 - - KAFKA_VERSION=1.0.0 - -before_install: -- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR} -- vagrant/install_cluster.sh -- vagrant/boot_cluster.sh -- vagrant/create_topics.sh - -install: -- make install_dependencies - -script: -- make test -- make vet -- make errcheck -- make fmt - -after_success: - - bash <(curl -s https://codecov.io/bash) - -sudo: false diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md deleted file mode 100644 index fcc9287237..0000000000 --- a/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ /dev/null @@ -1,461 +0,0 @@ -# Changelog - -#### Version 1.15.0 (2017-12-08) - -New Features: - - Claim official support for Kafka 1.0, though it did already work - ([#984](https://github.com/Shopify/sarama/pull/984)). - - Helper methods for Kafka version numbers to/from strings - ([#989](https://github.com/Shopify/sarama/pull/989)). - - Implement CreatePartitions request/response - ([#985](https://github.com/Shopify/sarama/pull/985)). - -Improvements: - - Add error codes 45-60 - ([#986](https://github.com/Shopify/sarama/issues/986)). - -Bug Fixes: - - Fix slow consuming for certain Kafka 0.11/1.0 configurations - ([#982](https://github.com/Shopify/sarama/pull/982)). - - Correctly determine when a FetchResponse contains the new message format - ([#990](https://github.com/Shopify/sarama/pull/990)). - - Fix producing with multiple headers - ([#996](https://github.com/Shopify/sarama/pull/996)). - - Fix handling of truncated record batches - ([#998](https://github.com/Shopify/sarama/pull/998)). - - Fix leaking metrics when closing brokers - ([#991](https://github.com/Shopify/sarama/pull/991)). - -#### Version 1.14.0 (2017-11-13) - -New Features: - - Add support for the new Kafka 0.11 record-batch format, including the wire - protocol and the necessary behavioural changes in the producer and consumer. - Transactions and idempotency are not yet supported, but producing and - consuming should work with all the existing bells and whistles (batching, - compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta - of Arista Networks for this work. Part of - ([#901](https://github.com/Shopify/sarama/issues/901)). - -Bug Fixes: - - Fix encoding of ProduceResponse versions in test - ([#970](https://github.com/Shopify/sarama/pull/970)). - - Return partial replicas list when we have it - ([#975](https://github.com/Shopify/sarama/pull/975)). - -#### Version 1.13.0 (2017-10-04) - -New Features: - - Support for FetchRequest version 3 - ([#905](https://github.com/Shopify/sarama/pull/905)). - - Permit setting version on mock FetchResponses - ([#939](https://github.com/Shopify/sarama/pull/939)). - - Add a configuration option to support storing only minimal metadata for - extremely large clusters - ([#937](https://github.com/Shopify/sarama/pull/937)). - - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets - ([#932](https://github.com/Shopify/sarama/pull/932)). - -Improvements: - - Provide the block-level timestamp when consuming compressed messages - ([#885](https://github.com/Shopify/sarama/issues/885)). - - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned - by the broker, which can be meaningful - ([#930](https://github.com/Shopify/sarama/pull/930)). - - Use a `Ticker` to reduce consumer timer overhead at the cost of higher - variance in the actual timeout - ([#933](https://github.com/Shopify/sarama/pull/933)). - -Bug Fixes: - - Gracefully handle messages with negative timestamps - ([#907](https://github.com/Shopify/sarama/pull/907)). - - Raise a proper error when encountering an unknown message version - ([#940](https://github.com/Shopify/sarama/pull/940)). - -#### Version 1.12.0 (2017-05-08) - -New Features: - - Added support for the `ApiVersions` request and response pair, and Kafka - version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note - that you still need to specify the Kafka version in the Sarama configuration - for the time being. - - Added a `Brokers` method to the Client which returns the complete set of - active brokers ([#813](https://github.com/Shopify/sarama/pull/813)). - - Added an `InSyncReplicas` method to the Client which returns the set of all - in-sync broker IDs for the given partition, now that the Kafka versions for - which this was misleading are no longer in our supported set - ([#872](https://github.com/Shopify/sarama/pull/872)). - - Added a `NewCustomHashPartitioner` method which allows constructing a hash - partitioner with a custom hash method in case the default (FNV-1a) is not - suitable - ([#837](https://github.com/Shopify/sarama/pull/837), - [#841](https://github.com/Shopify/sarama/pull/841)). - -Improvements: - - Recognize more Kafka error codes - ([#859](https://github.com/Shopify/sarama/pull/859)). - -Bug Fixes: - - Fix an issue where decoding a malformed FetchRequest would not return the - correct error ([#818](https://github.com/Shopify/sarama/pull/818)). - - Respect ordering of group protocols in JoinGroupRequests. This fix is - transparent if you're using the `AddGroupProtocol` or - `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from - the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` - ([#812](https://github.com/Shopify/sarama/issues/812)). - - Fix an alignment-related issue with atomics on 32-bit architectures - ([#859](https://github.com/Shopify/sarama/pull/859)). - -#### Version 1.11.0 (2016-12-20) - -_Important:_ As of Sarama 1.11 it is necessary to set the config value of -`Producer.Return.Successes` to true in order to use the SyncProducer. Previous -versions would silently override this value when instantiating a SyncProducer -which led to unexpected values and data races. - -New Features: - - Metrics! Thanks to Sébastien Launay for all his work on this feature - ([#701](https://github.com/Shopify/sarama/pull/701), - [#746](https://github.com/Shopify/sarama/pull/746), - [#766](https://github.com/Shopify/sarama/pull/766)). - - Add support for LZ4 compression - ([#786](https://github.com/Shopify/sarama/pull/786)). - - Add support for ListOffsetRequest v1 and Kafka 0.10.1 - ([#775](https://github.com/Shopify/sarama/pull/775)). - - Added a `HighWaterMarks` method to the Consumer which aggregates the - `HighWaterMarkOffset` values of its child topic/partitions - ([#769](https://github.com/Shopify/sarama/pull/769)). - -Bug Fixes: - - Fixed producing when using timestamps, compression and Kafka 0.10 - ([#759](https://github.com/Shopify/sarama/pull/759)). - - Added missing decoder methods to DescribeGroups response - ([#756](https://github.com/Shopify/sarama/pull/756)). - - Fix producer shutdown when `Return.Errors` is disabled - ([#787](https://github.com/Shopify/sarama/pull/787)). - - Don't mutate configuration in SyncProducer - ([#790](https://github.com/Shopify/sarama/pull/790)). - - Fix crash on SASL initialization failure - ([#795](https://github.com/Shopify/sarama/pull/795)). - -#### Version 1.10.1 (2016-08-30) - -Bug Fixes: - - Fix the documentation for `HashPartitioner` which was incorrect - ([#717](https://github.com/Shopify/sarama/pull/717)). - - Permit client creation even when it is limited by ACLs - ([#722](https://github.com/Shopify/sarama/pull/722)). - - Several fixes to the consumer timer optimization code, regressions introduced - in v1.10.0. Go's timers are finicky - ([#730](https://github.com/Shopify/sarama/pull/730), - [#733](https://github.com/Shopify/sarama/pull/733), - [#734](https://github.com/Shopify/sarama/pull/734)). - - Handle consuming compressed relative offsets with Kafka 0.10 - ([#735](https://github.com/Shopify/sarama/pull/735)). - -#### Version 1.10.0 (2016-08-02) - -_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of -Kafka you are running against (via the `config.Version` value) in order to use -features that may not be compatible with old Kafka versions. If you don't -specify this value it will default to 0.8.2 (the minimum supported), and trying -to use more recent features (like the offset manager) will fail with an error. - -_Also:_ The offset-manager's behaviour has been changed to match the upstream -java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and -[#713](https://github.com/Shopify/sarama/pull/713)). If you use the -offset-manager, please ensure that you are committing one *greater* than the -last consumed message offset or else you may end up consuming duplicate -messages. - -New Features: - - Support for Kafka 0.10 - ([#672](https://github.com/Shopify/sarama/pull/672), - [#678](https://github.com/Shopify/sarama/pull/678), - [#681](https://github.com/Shopify/sarama/pull/681), and others). - - Support for configuring the target Kafka version - ([#676](https://github.com/Shopify/sarama/pull/676)). - - Batch producing support in the SyncProducer - ([#677](https://github.com/Shopify/sarama/pull/677)). - - Extend producer mock to allow setting expectations on message contents - ([#667](https://github.com/Shopify/sarama/pull/667)). - -Improvements: - - Support `nil` compressed messages for deleting in compacted topics - ([#634](https://github.com/Shopify/sarama/pull/634)). - - Pre-allocate decoding errors, greatly reducing heap usage and GC time against - misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)). - - Re-use consumer expiry timers, removing one allocation per consumed message - ([#707](https://github.com/Shopify/sarama/pull/707)). - -Bug Fixes: - - Actually default the client ID to "sarama" like we say we do - ([#664](https://github.com/Shopify/sarama/pull/664)). - - Fix a rare issue where `Client.Leader` could return the wrong error - ([#685](https://github.com/Shopify/sarama/pull/685)). - - Fix a possible tight loop in the consumer - ([#693](https://github.com/Shopify/sarama/pull/693)). - - Match upstream's offset-tracking behaviour - ([#705](https://github.com/Shopify/sarama/pull/705)). - - Report UnknownTopicOrPartition errors from the offset manager - ([#706](https://github.com/Shopify/sarama/pull/706)). - - Fix possible negative partition value from the HashPartitioner - ([#709](https://github.com/Shopify/sarama/pull/709)). - -#### Version 1.9.0 (2016-05-16) - -New Features: - - Add support for custom offset manager retention durations - ([#602](https://github.com/Shopify/sarama/pull/602)). - - Publish low-level mocks to enable testing of third-party producer/consumer - implementations ([#570](https://github.com/Shopify/sarama/pull/570)). - - Declare support for Golang 1.6 - ([#611](https://github.com/Shopify/sarama/pull/611)). - - Support for SASL plain-text auth - ([#648](https://github.com/Shopify/sarama/pull/648)). - -Improvements: - - Simplified broker locking scheme slightly - ([#604](https://github.com/Shopify/sarama/pull/604)). - - Documentation cleanup - ([#605](https://github.com/Shopify/sarama/pull/605), - [#621](https://github.com/Shopify/sarama/pull/621), - [#654](https://github.com/Shopify/sarama/pull/654)). - -Bug Fixes: - - Fix race condition shutting down the OffsetManager - ([#658](https://github.com/Shopify/sarama/pull/658)). - -#### Version 1.8.0 (2016-02-01) - -New Features: - - Full support for Kafka 0.9: - - All protocol messages and fields - ([#586](https://github.com/Shopify/sarama/pull/586), - [#588](https://github.com/Shopify/sarama/pull/588), - [#590](https://github.com/Shopify/sarama/pull/590)). - - Verified that TLS support works - ([#581](https://github.com/Shopify/sarama/pull/581)). - - Fixed the OffsetManager compatibility - ([#585](https://github.com/Shopify/sarama/pull/585)). - -Improvements: - - Optimize for fewer system calls when reading from the network - ([#584](https://github.com/Shopify/sarama/pull/584)). - - Automatically retry `InvalidMessage` errors to match upstream behaviour - ([#589](https://github.com/Shopify/sarama/pull/589)). - -#### Version 1.7.0 (2015-12-11) - -New Features: - - Preliminary support for Kafka 0.9 - ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several - caveats: - - Protocol-layer support is mostly in place - ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9 - renamed some messages and fields, which we did not in order to preserve API - compatibility. - - The producer and consumer work against 0.9, but the offset manager does - not ([#573](https://github.com/Shopify/sarama/pull/573)). - - TLS support may or may not work - ([#581](https://github.com/Shopify/sarama/pull/581)). - -Improvements: - - Don't wait for request timeouts on dead brokers, greatly speeding recovery - when the TCP connection is left hanging - ([#548](https://github.com/Shopify/sarama/pull/548)). - - Refactored part of the producer. The new version provides a much more elegant - solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also - slightly more efficient, and much more precise in calculating batch sizes - when compression is used - ([#549](https://github.com/Shopify/sarama/pull/549), - [#550](https://github.com/Shopify/sarama/pull/550), - [#551](https://github.com/Shopify/sarama/pull/551)). - -Bug Fixes: - - Fix race condition in consumer test mock - ([#553](https://github.com/Shopify/sarama/pull/553)). - -#### Version 1.6.1 (2015-09-25) - -Bug Fixes: - - Fix panic that could occur if a user-supplied message value failed to encode - ([#449](https://github.com/Shopify/sarama/pull/449)). - -#### Version 1.6.0 (2015-09-04) - -New Features: - - Implementation of a consumer offset manager using the APIs introduced in - Kafka 0.8.2. The API is designed mainly for integration into a future - high-level consumer, not for direct use, although it is *possible* to use it - directly. - ([#461](https://github.com/Shopify/sarama/pull/461)). - -Improvements: - - CRC32 calculation is much faster on machines with SSE4.2 instructions, - removing a major hotspot from most profiles - ([#255](https://github.com/Shopify/sarama/pull/255)). - -Bug Fixes: - - Make protocol decoding more robust against some malformed packets generated - by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523), - [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways - ([#528](https://github.com/Shopify/sarama/pull/528)). - - Fix a potential race condition panic in the consumer on shutdown - ([#529](https://github.com/Shopify/sarama/pull/529)). - -#### Version 1.5.0 (2015-08-17) - -New Features: - - TLS-encrypted network connections are now supported. This feature is subject - to change when Kafka releases built-in TLS support, but for now this is - enough to work with TLS-terminating proxies - ([#154](https://github.com/Shopify/sarama/pull/154)). - -Improvements: - - The consumer will not block if a single partition is not drained by the user; - all other partitions will continue to consume normally - ([#485](https://github.com/Shopify/sarama/pull/485)). - - Formatting of error strings has been much improved - ([#495](https://github.com/Shopify/sarama/pull/495)). - - Internal refactoring of the producer for code cleanliness and to enable - future work ([#300](https://github.com/Shopify/sarama/pull/300)). - -Bug Fixes: - - Fix a potential deadlock in the consumer on shutdown - ([#475](https://github.com/Shopify/sarama/pull/475)). - -#### Version 1.4.3 (2015-07-21) - -Bug Fixes: - - Don't include the partitioner in the producer's "fetch partitions" - circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)). - - Don't retry messages until the broker is closed when abandoning a broker in - the producer ([#468](https://github.com/Shopify/sarama/pull/468)). - - Update the import path for snappy-go, it has moved again and the API has - changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). - -#### Version 1.4.2 (2015-05-27) - -Bug Fixes: - - Update the import path for snappy-go, it has moved from google code to github - ([#456](https://github.com/Shopify/sarama/pull/456)). - -#### Version 1.4.1 (2015-05-25) - -Improvements: - - Optimizations when decoding snappy messages, thanks to John Potocny - ([#446](https://github.com/Shopify/sarama/pull/446)). - -Bug Fixes: - - Fix hypothetical race conditions on producer shutdown - ([#450](https://github.com/Shopify/sarama/pull/450), - [#451](https://github.com/Shopify/sarama/pull/451)). - -#### Version 1.4.0 (2015-05-01) - -New Features: - - The consumer now implements `Topics()` and `Partitions()` methods to enable - users to dynamically choose what topics/partitions to consume without - instantiating a full client - ([#431](https://github.com/Shopify/sarama/pull/431)). - - The partition-consumer now exposes the high water mark offset value returned - by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)). - - Added a `kafka-console-consumer` tool capable of handling multiple - partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` - ([#439](https://github.com/Shopify/sarama/pull/439), - [#442](https://github.com/Shopify/sarama/pull/442)). - -Improvements: - - The producer's logging during retry scenarios is more consistent, more - useful, and slightly less verbose - ([#429](https://github.com/Shopify/sarama/pull/429)). - - The client now shuffles its initial list of seed brokers in order to prevent - thundering herd on the first broker in the list - ([#441](https://github.com/Shopify/sarama/pull/441)). - -Bug Fixes: - - The producer now correctly manages its state if retries occur when it is - shutting down, fixing several instances of confusing behaviour and at least - one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)). - - The consumer now handles messages for different partitions asynchronously, - making it much more resilient to specific user code ordering - ([#325](https://github.com/Shopify/sarama/pull/325)). - -#### Version 1.3.0 (2015-04-16) - -New Features: - - The client now tracks consumer group coordinators using - ConsumerMetadataRequests similar to how it tracks partition leadership using - regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)). - This adds two methods to the client API: - - `Coordinator(consumerGroup string) (*Broker, error)` - - `RefreshCoordinator(consumerGroup string) error` - -Improvements: - - ConsumerMetadataResponses now automatically create a Broker object out of the - ID/address/port combination for the Coordinator; accessing the fields - individually has been deprecated - ([#413](https://github.com/Shopify/sarama/pull/413)). - - Much improved handling of `OffsetOutOfRange` errors in the consumer. - Consumers will fail to start if the provided offset is out of range - ([#418](https://github.com/Shopify/sarama/pull/418)) - and they will automatically shut down if the offset falls out of range - ([#424](https://github.com/Shopify/sarama/pull/424)). - - Small performance improvement in encoding and decoding protocol messages - ([#427](https://github.com/Shopify/sarama/pull/427)). - -Bug Fixes: - - Fix a rare race condition in the client's background metadata refresher if - it happens to be activated while the client is being closed - ([#422](https://github.com/Shopify/sarama/pull/422)). - -#### Version 1.2.0 (2015-04-07) - -Improvements: - - The producer's behaviour when `Flush.Frequency` is set is now more intuitive - ([#389](https://github.com/Shopify/sarama/pull/389)). - - The producer is now somewhat more memory-efficient during and after retrying - messages due to an improved queue implementation - ([#396](https://github.com/Shopify/sarama/pull/396)). - - The consumer produces much more useful logging output when leadership - changes ([#385](https://github.com/Shopify/sarama/pull/385)). - - The client's `GetOffset` method will now automatically refresh metadata and - retry once in the event of stale information or similar - ([#394](https://github.com/Shopify/sarama/pull/394)). - - Broker connections now have support for using TCP keepalives - ([#407](https://github.com/Shopify/sarama/issues/407)). - -Bug Fixes: - - The OffsetCommitRequest message now correctly implements all three possible - API versions ([#390](https://github.com/Shopify/sarama/pull/390), - [#400](https://github.com/Shopify/sarama/pull/400)). - -#### Version 1.1.0 (2015-03-20) - -Improvements: - - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly - broken topics don't choke throughput - ([#373](https://github.com/Shopify/sarama/pull/373)). - -Bug Fixes: - - Fix the producer's internal reference counting in certain unusual scenarios - ([#367](https://github.com/Shopify/sarama/pull/367)). - - Fix the consumer's internal reference counting in certain unusual scenarios - ([#369](https://github.com/Shopify/sarama/pull/369)). - - Fix a condition where the producer's internal control messages could have - gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)). - - Fix an issue where invalid partition lists would be cached when asking for - metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). - - -#### Version 1.0.0 (2015-03-17) - -Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: - -- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. -- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. -- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package. -- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. -- All the configuration values have been unified in the `Config` struct. -- Much improved test suite. diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE deleted file mode 100644 index 8121b63b1c..0000000000 --- a/vendor/github.com/Shopify/sarama/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2013 Evan Huus - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile deleted file mode 100644 index 58a39e4f34..0000000000 --- a/vendor/github.com/Shopify/sarama/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -default: fmt vet errcheck test - -# Taken from https://github.com/codecov/example-go#caveat-multiple-files -test: - echo "" > coverage.txt - for d in `go list ./... | grep -v vendor`; do \ - go test -v -timeout 60s -race -coverprofile=profile.out -covermode=atomic $$d; \ - if [ -f profile.out ]; then \ - cat profile.out >> coverage.txt; \ - rm profile.out; \ - fi \ - done - -vet: - go vet ./... - -errcheck: - errcheck github.com/Shopify/sarama/... - -fmt: - @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi - -install_dependencies: install_errcheck get - -install_errcheck: - go get github.com/kisielk/errcheck - -get: - go get -t diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md deleted file mode 100644 index 25eb0cf926..0000000000 --- a/vendor/github.com/Shopify/sarama/README.md +++ /dev/null @@ -1,39 +0,0 @@ -sarama -====== - -[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama) -[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama) -[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) - -Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later). - -### Getting started - -- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama). -- Mocks for testing are available in the [mocks](./mocks) subpackage. -- The [examples](./examples) directory contains more elaborate example applications. -- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. - -You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions). - -### Compatibility and API stability - -Sarama provides a "2 releases + 2 months" compatibility guarantee: we support -the two latest stable releases of Kafka and Go, and we provide a two month -grace period for older releases. This means we currently officially support -Go 1.9 through 1.7, and Kafka 1.0 through 0.10, although older releases are -still likely to work. - -Sarama follows semantic versioning and provides API stability via the gopkg.in service. -You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. -A changelog is available [here](CHANGELOG.md). - -### Contributing - -* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md). -* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more - technical and design details. -* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) - contains a wealth of useful information. -* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. -* If you have any questions, just ask! diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile deleted file mode 100644 index f4b848a301..0000000000 --- a/vendor/github.com/Shopify/sarama/Vagrantfile +++ /dev/null @@ -1,20 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! -VAGRANTFILE_API_VERSION = "2" - -# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB -MEMORY = 3072 - -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - config.vm.box = "ubuntu/trusty64" - - config.vm.provision :shell, path: "vagrant/provision.sh" - - config.vm.network "private_network", ip: "192.168.100.67" - - config.vm.provider "virtualbox" do |v| - v.memory = MEMORY - end -end diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go deleted file mode 100644 index ab65f01ccf..0000000000 --- a/vendor/github.com/Shopify/sarama/api_versions_request.go +++ /dev/null @@ -1,24 +0,0 @@ -package sarama - -type ApiVersionsRequest struct { -} - -func (r *ApiVersionsRequest) encode(pe packetEncoder) error { - return nil -} - -func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { - return nil -} - -func (r *ApiVersionsRequest) key() int16 { - return 18 -} - -func (r *ApiVersionsRequest) version() int16 { - return 0 -} - -func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { - return V0_10_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go deleted file mode 100644 index 23bc326e15..0000000000 --- a/vendor/github.com/Shopify/sarama/api_versions_response.go +++ /dev/null @@ -1,87 +0,0 @@ -package sarama - -type ApiVersionsResponseBlock struct { - ApiKey int16 - MinVersion int16 - MaxVersion int16 -} - -func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error { - pe.putInt16(b.ApiKey) - pe.putInt16(b.MinVersion) - pe.putInt16(b.MaxVersion) - return nil -} - -func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error { - var err error - - if b.ApiKey, err = pd.getInt16(); err != nil { - return err - } - - if b.MinVersion, err = pd.getInt16(); err != nil { - return err - } - - if b.MaxVersion, err = pd.getInt16(); err != nil { - return err - } - - return nil -} - -type ApiVersionsResponse struct { - Err KError - ApiVersions []*ApiVersionsResponseBlock -} - -func (r *ApiVersionsResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - if err := pe.putArrayLength(len(r.ApiVersions)); err != nil { - return err - } - for _, apiVersion := range r.ApiVersions { - if err := apiVersion.encode(pe); err != nil { - return err - } - } - return nil -} - -func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error { - kerr, err := pd.getInt16() - if err != nil { - return err - } - - r.Err = KError(kerr) - - numBlocks, err := pd.getArrayLength() - if err != nil { - return err - } - - r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks) - for i := 0; i < numBlocks; i++ { - block := new(ApiVersionsResponseBlock) - if err := block.decode(pd); err != nil { - return err - } - r.ApiVersions[i] = block - } - - return nil -} - -func (r *ApiVersionsResponse) key() int16 { - return 18 -} - -func (r *ApiVersionsResponse) version() int16 { - return 0 -} - -func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { - return V0_10_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go deleted file mode 100644 index 1eff81cbf6..0000000000 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ /dev/null @@ -1,921 +0,0 @@ -package sarama - -import ( - "encoding/binary" - "fmt" - "sync" - "time" - - "github.com/eapache/go-resiliency/breaker" - "github.com/eapache/queue" -) - -// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages -// to the correct broker for the provided topic-partition, refreshing metadata as appropriate, -// and parses responses for errors. You must read from the Errors() channel or the -// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid -// leaks: it will not be garbage-collected automatically when it passes out of -// scope. -type AsyncProducer interface { - - // AsyncClose triggers a shutdown of the producer. The shutdown has completed - // when both the Errors and Successes channels have been closed. When calling - // AsyncClose, you *must* continue to read from those channels in order to - // drain the results of any messages in flight. - AsyncClose() - - // Close shuts down the producer and waits for any buffered messages to be - // flushed. You must call this function before a producer object passes out of - // scope, as it may otherwise leak memory. You must call this before calling - // Close on the underlying client. - Close() error - - // Input is the input channel for the user to write messages to that they - // wish to send. - Input() chan<- *ProducerMessage - - // Successes is the success output channel back to the user when Return.Successes is - // enabled. If Return.Successes is true, you MUST read from this channel or the - // Producer will deadlock. It is suggested that you send and read messages - // together in a single select statement. - Successes() <-chan *ProducerMessage - - // Errors is the error output channel back to the user. You MUST read from this - // channel or the Producer will deadlock when the channel is full. Alternatively, - // you can set Producer.Return.Errors in your config to false, which prevents - // errors to be returned. - Errors() <-chan *ProducerError -} - -type asyncProducer struct { - client Client - conf *Config - ownClient bool - - errors chan *ProducerError - input, successes, retries chan *ProducerMessage - inFlight sync.WaitGroup - - brokers map[*Broker]chan<- *ProducerMessage - brokerRefs map[chan<- *ProducerMessage]int - brokerLock sync.Mutex -} - -// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. -func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) { - client, err := NewClient(addrs, conf) - if err != nil { - return nil, err - } - - p, err := NewAsyncProducerFromClient(client) - if err != nil { - return nil, err - } - p.(*asyncProducer).ownClient = true - return p, nil -} - -// NewAsyncProducerFromClient creates a new Producer using the given client. It is still -// necessary to call Close() on the underlying client when shutting down this producer. -func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { - // Check that we are not dealing with a closed Client before processing any other arguments - if client.Closed() { - return nil, ErrClosedClient - } - - p := &asyncProducer{ - client: client, - conf: client.Config(), - errors: make(chan *ProducerError), - input: make(chan *ProducerMessage), - successes: make(chan *ProducerMessage), - retries: make(chan *ProducerMessage), - brokers: make(map[*Broker]chan<- *ProducerMessage), - brokerRefs: make(map[chan<- *ProducerMessage]int), - } - - // launch our singleton dispatchers - go withRecover(p.dispatcher) - go withRecover(p.retryHandler) - - return p, nil -} - -type flagSet int8 - -const ( - syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer - fin // final message from partitionProducer to brokerProducer and back - shutdown // start the shutdown process -) - -// ProducerMessage is the collection of elements passed to the Producer in order to send a message. -type ProducerMessage struct { - Topic string // The Kafka topic for this message. - // The partitioning key for this message. Pre-existing Encoders include - // StringEncoder and ByteEncoder. - Key Encoder - // The actual message to store in Kafka. Pre-existing Encoders include - // StringEncoder and ByteEncoder. - Value Encoder - - // The headers are key-value pairs that are transparently passed - // by Kafka between producers and consumers. - Headers []RecordHeader - - // This field is used to hold arbitrary data you wish to include so it - // will be available when receiving on the Successes and Errors channels. - // Sarama completely ignores this field and is only to be used for - // pass-through data. - Metadata interface{} - - // Below this point are filled in by the producer as the message is processed - - // Offset is the offset of the message stored on the broker. This is only - // guaranteed to be defined if the message was successfully delivered and - // RequiredAcks is not NoResponse. - Offset int64 - // Partition is the partition that the message was sent to. This is only - // guaranteed to be defined if the message was successfully delivered. - Partition int32 - // Timestamp is the timestamp assigned to the message by the broker. This - // is only guaranteed to be defined if the message was successfully - // delivered, RequiredAcks is not NoResponse, and the Kafka broker is at - // least version 0.10.0. - Timestamp time.Time - - retries int - flags flagSet -} - -const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. - -func (m *ProducerMessage) byteSize(version int) int { - var size int - if version >= 2 { - size = maximumRecordOverhead - for _, h := range m.Headers { - size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32 - } - } else { - size = producerMessageOverhead - } - if m.Key != nil { - size += m.Key.Length() - } - if m.Value != nil { - size += m.Value.Length() - } - return size -} - -func (m *ProducerMessage) clear() { - m.flags = 0 - m.retries = 0 -} - -// ProducerError is the type of error generated when the producer fails to deliver a message. -// It contains the original ProducerMessage as well as the actual error value. -type ProducerError struct { - Msg *ProducerMessage - Err error -} - -func (pe ProducerError) Error() string { - return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err) -} - -// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface. -// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel -// when closing a producer. -type ProducerErrors []*ProducerError - -func (pe ProducerErrors) Error() string { - return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe)) -} - -func (p *asyncProducer) Errors() <-chan *ProducerError { - return p.errors -} - -func (p *asyncProducer) Successes() <-chan *ProducerMessage { - return p.successes -} - -func (p *asyncProducer) Input() chan<- *ProducerMessage { - return p.input -} - -func (p *asyncProducer) Close() error { - p.AsyncClose() - - if p.conf.Producer.Return.Successes { - go withRecover(func() { - for range p.successes { - } - }) - } - - var errors ProducerErrors - if p.conf.Producer.Return.Errors { - for event := range p.errors { - errors = append(errors, event) - } - } else { - <-p.errors - } - - if len(errors) > 0 { - return errors - } - return nil -} - -func (p *asyncProducer) AsyncClose() { - go withRecover(p.shutdown) -} - -// singleton -// dispatches messages by topic -func (p *asyncProducer) dispatcher() { - handlers := make(map[string]chan<- *ProducerMessage) - shuttingDown := false - - for msg := range p.input { - if msg == nil { - Logger.Println("Something tried to send a nil message, it was ignored.") - continue - } - - if msg.flags&shutdown != 0 { - shuttingDown = true - p.inFlight.Done() - continue - } else if msg.retries == 0 { - if shuttingDown { - // we can't just call returnError here because that decrements the wait group, - // which hasn't been incremented yet for this message, and shouldn't be - pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown} - if p.conf.Producer.Return.Errors { - p.errors <- pErr - } else { - Logger.Println(pErr) - } - continue - } - p.inFlight.Add(1) - } - - version := 1 - if p.conf.Version.IsAtLeast(V0_11_0_0) { - version = 2 - } - if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes { - p.returnError(msg, ErrMessageSizeTooLarge) - continue - } - - handler := handlers[msg.Topic] - if handler == nil { - handler = p.newTopicProducer(msg.Topic) - handlers[msg.Topic] = handler - } - - handler <- msg - } - - for _, handler := range handlers { - close(handler) - } -} - -// one per topic -// partitions messages, then dispatches them by partition -type topicProducer struct { - parent *asyncProducer - topic string - input <-chan *ProducerMessage - - breaker *breaker.Breaker - handlers map[int32]chan<- *ProducerMessage - partitioner Partitioner -} - -func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage { - input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) - tp := &topicProducer{ - parent: p, - topic: topic, - input: input, - breaker: breaker.New(3, 1, 10*time.Second), - handlers: make(map[int32]chan<- *ProducerMessage), - partitioner: p.conf.Producer.Partitioner(topic), - } - go withRecover(tp.dispatch) - return input -} - -func (tp *topicProducer) dispatch() { - for msg := range tp.input { - if msg.retries == 0 { - if err := tp.partitionMessage(msg); err != nil { - tp.parent.returnError(msg, err) - continue - } - } - - handler := tp.handlers[msg.Partition] - if handler == nil { - handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition) - tp.handlers[msg.Partition] = handler - } - - handler <- msg - } - - for _, handler := range tp.handlers { - close(handler) - } -} - -func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { - var partitions []int32 - - err := tp.breaker.Run(func() (err error) { - if tp.partitioner.RequiresConsistency() { - partitions, err = tp.parent.client.Partitions(msg.Topic) - } else { - partitions, err = tp.parent.client.WritablePartitions(msg.Topic) - } - return - }) - - if err != nil { - return err - } - - numPartitions := int32(len(partitions)) - - if numPartitions == 0 { - return ErrLeaderNotAvailable - } - - choice, err := tp.partitioner.Partition(msg, numPartitions) - - if err != nil { - return err - } else if choice < 0 || choice >= numPartitions { - return ErrInvalidPartition - } - - msg.Partition = partitions[choice] - - return nil -} - -// one per partition per topic -// dispatches messages to the appropriate broker -// also responsible for maintaining message order during retries -type partitionProducer struct { - parent *asyncProducer - topic string - partition int32 - input <-chan *ProducerMessage - - leader *Broker - breaker *breaker.Breaker - output chan<- *ProducerMessage - - // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through, - // all other messages get buffered in retryState[msg.retries].buf to preserve ordering - // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and - // therefore whether our buffer is complete and safe to flush) - highWatermark int - retryState []partitionRetryState -} - -type partitionRetryState struct { - buf []*ProducerMessage - expectChaser bool -} - -func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage { - input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) - pp := &partitionProducer{ - parent: p, - topic: topic, - partition: partition, - input: input, - - breaker: breaker.New(3, 1, 10*time.Second), - retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1), - } - go withRecover(pp.dispatch) - return input -} - -func (pp *partitionProducer) dispatch() { - // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` - // on the first message - pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition) - if pp.leader != nil { - pp.output = pp.parent.getBrokerProducer(pp.leader) - pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight - pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} - } - - for msg := range pp.input { - if msg.retries > pp.highWatermark { - // a new, higher, retry level; handle it and then back off - pp.newHighWatermark(msg.retries) - time.Sleep(pp.parent.conf.Producer.Retry.Backoff) - } else if pp.highWatermark > 0 { - // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level - if msg.retries < pp.highWatermark { - // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin) - if msg.flags&fin == fin { - pp.retryState[msg.retries].expectChaser = false - pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected - } else { - pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg) - } - continue - } else if msg.flags&fin == fin { - // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set, - // meaning this retry level is done and we can go down (at least) one level and flush that - pp.retryState[pp.highWatermark].expectChaser = false - pp.flushRetryBuffers() - pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected - continue - } - } - - // if we made it this far then the current msg contains real data, and can be sent to the next goroutine - // without breaking any of our ordering guarantees - - if pp.output == nil { - if err := pp.updateLeader(); err != nil { - pp.parent.returnError(msg, err) - time.Sleep(pp.parent.conf.Producer.Retry.Backoff) - continue - } - Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) - } - - pp.output <- msg - } - - if pp.output != nil { - pp.parent.unrefBrokerProducer(pp.leader, pp.output) - } -} - -func (pp *partitionProducer) newHighWatermark(hwm int) { - Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm) - pp.highWatermark = hwm - - // send off a fin so that we know when everything "in between" has made it - // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages) - pp.retryState[pp.highWatermark].expectChaser = true - pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight - pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1} - - // a new HWM means that our current broker selection is out of date - Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) - pp.parent.unrefBrokerProducer(pp.leader, pp.output) - pp.output = nil -} - -func (pp *partitionProducer) flushRetryBuffers() { - Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark) - for { - pp.highWatermark-- - - if pp.output == nil { - if err := pp.updateLeader(); err != nil { - pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err) - goto flushDone - } - Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) - } - - for _, msg := range pp.retryState[pp.highWatermark].buf { - pp.output <- msg - } - - flushDone: - pp.retryState[pp.highWatermark].buf = nil - if pp.retryState[pp.highWatermark].expectChaser { - Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark) - break - } else if pp.highWatermark == 0 { - Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition) - break - } - } -} - -func (pp *partitionProducer) updateLeader() error { - return pp.breaker.Run(func() (err error) { - if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil { - return err - } - - if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil { - return err - } - - pp.output = pp.parent.getBrokerProducer(pp.leader) - pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight - pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} - - return nil - }) -} - -// one per broker; also constructs an associated flusher -func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage { - var ( - input = make(chan *ProducerMessage) - bridge = make(chan *produceSet) - responses = make(chan *brokerProducerResponse) - ) - - bp := &brokerProducer{ - parent: p, - broker: broker, - input: input, - output: bridge, - responses: responses, - buffer: newProduceSet(p), - currentRetries: make(map[string]map[int32]error), - } - go withRecover(bp.run) - - // minimal bridge to make the network response `select`able - go withRecover(func() { - for set := range bridge { - request := set.buildRequest() - - response, err := broker.Produce(request) - - responses <- &brokerProducerResponse{ - set: set, - err: err, - res: response, - } - } - close(responses) - }) - - return input -} - -type brokerProducerResponse struct { - set *produceSet - err error - res *ProduceResponse -} - -// groups messages together into appropriately-sized batches for sending to the broker -// handles state related to retries etc -type brokerProducer struct { - parent *asyncProducer - broker *Broker - - input <-chan *ProducerMessage - output chan<- *produceSet - responses <-chan *brokerProducerResponse - - buffer *produceSet - timer <-chan time.Time - timerFired bool - - closing error - currentRetries map[string]map[int32]error -} - -func (bp *brokerProducer) run() { - var output chan<- *produceSet - Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID()) - - for { - select { - case msg := <-bp.input: - if msg == nil { - bp.shutdown() - return - } - - if msg.flags&syn == syn { - Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n", - bp.broker.ID(), msg.Topic, msg.Partition) - if bp.currentRetries[msg.Topic] == nil { - bp.currentRetries[msg.Topic] = make(map[int32]error) - } - bp.currentRetries[msg.Topic][msg.Partition] = nil - bp.parent.inFlight.Done() - continue - } - - if reason := bp.needsRetry(msg); reason != nil { - bp.parent.retryMessage(msg, reason) - - if bp.closing == nil && msg.flags&fin == fin { - // we were retrying this partition but we can start processing again - delete(bp.currentRetries[msg.Topic], msg.Partition) - Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n", - bp.broker.ID(), msg.Topic, msg.Partition) - } - - continue - } - - if bp.buffer.wouldOverflow(msg) { - if err := bp.waitForSpace(msg); err != nil { - bp.parent.retryMessage(msg, err) - continue - } - } - - if err := bp.buffer.add(msg); err != nil { - bp.parent.returnError(msg, err) - continue - } - - if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil { - bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency) - } - case <-bp.timer: - bp.timerFired = true - case output <- bp.buffer: - bp.rollOver() - case response := <-bp.responses: - bp.handleResponse(response) - } - - if bp.timerFired || bp.buffer.readyToFlush() { - output = bp.output - } else { - output = nil - } - } -} - -func (bp *brokerProducer) shutdown() { - for !bp.buffer.empty() { - select { - case response := <-bp.responses: - bp.handleResponse(response) - case bp.output <- bp.buffer: - bp.rollOver() - } - } - close(bp.output) - for response := range bp.responses { - bp.handleResponse(response) - } - - Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID()) -} - -func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error { - if bp.closing != nil { - return bp.closing - } - - return bp.currentRetries[msg.Topic][msg.Partition] -} - -func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error { - Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) - - for { - select { - case response := <-bp.responses: - bp.handleResponse(response) - // handling a response can change our state, so re-check some things - if reason := bp.needsRetry(msg); reason != nil { - return reason - } else if !bp.buffer.wouldOverflow(msg) { - return nil - } - case bp.output <- bp.buffer: - bp.rollOver() - return nil - } - } -} - -func (bp *brokerProducer) rollOver() { - bp.timer = nil - bp.timerFired = false - bp.buffer = newProduceSet(bp.parent) -} - -func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) { - if response.err != nil { - bp.handleError(response.set, response.err) - } else { - bp.handleSuccess(response.set, response.res) - } - - if bp.buffer.empty() { - bp.rollOver() // this can happen if the response invalidated our buffer - } -} - -func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) { - // we iterate through the blocks in the request set, not the response, so that we notice - // if the response is missing a block completely - sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - if response == nil { - // this only happens when RequiredAcks is NoResponse, so we have to assume success - bp.parent.returnSuccesses(msgs) - return - } - - block := response.GetBlock(topic, partition) - if block == nil { - bp.parent.returnErrors(msgs, ErrIncompleteResponse) - return - } - - switch block.Err { - // Success - case ErrNoError: - if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() { - for _, msg := range msgs { - msg.Timestamp = block.Timestamp - } - } - for i, msg := range msgs { - msg.Offset = block.Offset + int64(i) - } - bp.parent.returnSuccesses(msgs) - // Retriable errors - case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, - ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: - Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", - bp.broker.ID(), topic, partition, block.Err) - bp.currentRetries[topic][partition] = block.Err - bp.parent.retryMessages(msgs, block.Err) - bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err) - // Other non-retriable errors - default: - bp.parent.returnErrors(msgs, block.Err) - } - }) -} - -func (bp *brokerProducer) handleError(sent *produceSet, err error) { - switch err.(type) { - case PacketEncodingError: - sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - bp.parent.returnErrors(msgs, err) - }) - default: - Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err) - bp.parent.abandonBrokerConnection(bp.broker) - _ = bp.broker.Close() - bp.closing = err - sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - bp.parent.retryMessages(msgs, err) - }) - bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - bp.parent.retryMessages(msgs, err) - }) - bp.rollOver() - } -} - -// singleton -// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock -// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel -func (p *asyncProducer) retryHandler() { - var msg *ProducerMessage - buf := queue.New() - - for { - if buf.Length() == 0 { - msg = <-p.retries - } else { - select { - case msg = <-p.retries: - case p.input <- buf.Peek().(*ProducerMessage): - buf.Remove() - continue - } - } - - if msg == nil { - return - } - - buf.Add(msg) - } -} - -// utility functions - -func (p *asyncProducer) shutdown() { - Logger.Println("Producer shutting down.") - p.inFlight.Add(1) - p.input <- &ProducerMessage{flags: shutdown} - - p.inFlight.Wait() - - if p.ownClient { - err := p.client.Close() - if err != nil { - Logger.Println("producer/shutdown failed to close the embedded client:", err) - } - } - - close(p.input) - close(p.retries) - close(p.errors) - close(p.successes) -} - -func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { - msg.clear() - pErr := &ProducerError{Msg: msg, Err: err} - if p.conf.Producer.Return.Errors { - p.errors <- pErr - } else { - Logger.Println(pErr) - } - p.inFlight.Done() -} - -func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) { - for _, msg := range batch { - p.returnError(msg, err) - } -} - -func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) { - for _, msg := range batch { - if p.conf.Producer.Return.Successes { - msg.clear() - p.successes <- msg - } - p.inFlight.Done() - } -} - -func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) { - if msg.retries >= p.conf.Producer.Retry.Max { - p.returnError(msg, err) - } else { - msg.retries++ - p.retries <- msg - } -} - -func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) { - for _, msg := range batch { - p.retryMessage(msg, err) - } -} - -func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage { - p.brokerLock.Lock() - defer p.brokerLock.Unlock() - - bp := p.brokers[broker] - - if bp == nil { - bp = p.newBrokerProducer(broker) - p.brokers[broker] = bp - p.brokerRefs[bp] = 0 - } - - p.brokerRefs[bp]++ - - return bp -} - -func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) { - p.brokerLock.Lock() - defer p.brokerLock.Unlock() - - p.brokerRefs[bp]-- - if p.brokerRefs[bp] == 0 { - close(bp) - delete(p.brokerRefs, bp) - - if p.brokers[broker] == bp { - delete(p.brokers, broker) - } - } -} - -func (p *asyncProducer) abandonBrokerConnection(broker *Broker) { - p.brokerLock.Lock() - defer p.brokerLock.Unlock() - - delete(p.brokers, broker) -} diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go deleted file mode 100644 index 923b07faf2..0000000000 --- a/vendor/github.com/Shopify/sarama/broker.go +++ /dev/null @@ -1,692 +0,0 @@ -package sarama - -import ( - "crypto/tls" - "encoding/binary" - "fmt" - "io" - "net" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/rcrowley/go-metrics" -) - -// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe. -type Broker struct { - id int32 - addr string - - conf *Config - correlationID int32 - conn net.Conn - connErr error - lock sync.Mutex - opened int32 - - responses chan responsePromise - done chan bool - - incomingByteRate metrics.Meter - requestRate metrics.Meter - requestSize metrics.Histogram - requestLatency metrics.Histogram - outgoingByteRate metrics.Meter - responseRate metrics.Meter - responseSize metrics.Histogram - brokerIncomingByteRate metrics.Meter - brokerRequestRate metrics.Meter - brokerRequestSize metrics.Histogram - brokerRequestLatency metrics.Histogram - brokerOutgoingByteRate metrics.Meter - brokerResponseRate metrics.Meter - brokerResponseSize metrics.Histogram -} - -type responsePromise struct { - requestTime time.Time - correlationID int32 - packets chan []byte - errors chan error -} - -// NewBroker creates and returns a Broker targeting the given host:port address. -// This does not attempt to actually connect, you have to call Open() for that. -func NewBroker(addr string) *Broker { - return &Broker{id: -1, addr: addr} -} - -// Open tries to connect to the Broker if it is not already connected or connecting, but does not block -// waiting for the connection to complete. This means that any subsequent operations on the broker will -// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call, -// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or -// AlreadyConnected. If conf is nil, the result of NewConfig() is used. -func (b *Broker) Open(conf *Config) error { - if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) { - return ErrAlreadyConnected - } - - if conf == nil { - conf = NewConfig() - } - - err := conf.Validate() - if err != nil { - return err - } - - b.lock.Lock() - - go withRecover(func() { - defer b.lock.Unlock() - - dialer := net.Dialer{ - Timeout: conf.Net.DialTimeout, - KeepAlive: conf.Net.KeepAlive, - } - - if conf.Net.TLS.Enable { - b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config) - } else { - b.conn, b.connErr = dialer.Dial("tcp", b.addr) - } - if b.connErr != nil { - Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr) - b.conn = nil - atomic.StoreInt32(&b.opened, 0) - return - } - b.conn = newBufConn(b.conn) - - b.conf = conf - - // Create or reuse the global metrics shared between brokers - b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry) - b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry) - b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry) - b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry) - b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry) - b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry) - b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry) - // Do not gather metrics for seeded broker (only used during bootstrap) because they share - // the same id (-1) and are already exposed through the global metrics above - if b.id >= 0 { - b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry) - b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry) - b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry) - b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry) - b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry) - b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry) - b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry) - } - - if conf.Net.SASL.Enable { - b.connErr = b.sendAndReceiveSASLPlainAuth() - if b.connErr != nil { - err = b.conn.Close() - if err == nil { - Logger.Printf("Closed connection to broker %s\n", b.addr) - } else { - Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) - } - b.conn = nil - atomic.StoreInt32(&b.opened, 0) - return - } - } - - b.done = make(chan bool) - b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1) - - if b.id >= 0 { - Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id) - } else { - Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr) - } - go withRecover(b.responseReceiver) - }) - - return nil -} - -// Connected returns true if the broker is connected and false otherwise. If the broker is not -// connected but it had tried to connect, the error from that connection attempt is also returned. -func (b *Broker) Connected() (bool, error) { - b.lock.Lock() - defer b.lock.Unlock() - - return b.conn != nil, b.connErr -} - -func (b *Broker) Close() error { - b.lock.Lock() - defer b.lock.Unlock() - - if b.conn == nil { - return ErrNotConnected - } - - close(b.responses) - <-b.done - - err := b.conn.Close() - - b.conn = nil - b.connErr = nil - b.done = nil - b.responses = nil - - if b.id >= 0 { - b.conf.MetricRegistry.Unregister(getMetricNameForBroker("incoming-byte-rate", b)) - b.conf.MetricRegistry.Unregister(getMetricNameForBroker("request-rate", b)) - b.conf.MetricRegistry.Unregister(getMetricNameForBroker("outgoing-byte-rate", b)) - b.conf.MetricRegistry.Unregister(getMetricNameForBroker("response-rate", b)) - } - - if err == nil { - Logger.Printf("Closed connection to broker %s\n", b.addr) - } else { - Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) - } - - atomic.StoreInt32(&b.opened, 0) - - return err -} - -// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known. -func (b *Broker) ID() int32 { - return b.id -} - -// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker. -func (b *Broker) Addr() string { - return b.addr -} - -func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { - response := new(MetadataResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) { - response := new(ConsumerMetadataResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) { - response := new(OffsetResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { - var response *ProduceResponse - var err error - - if request.RequiredAcks == NoResponse { - err = b.sendAndReceive(request, nil) - } else { - response = new(ProduceResponse) - err = b.sendAndReceive(request, response) - } - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { - response := new(FetchResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { - response := new(OffsetCommitResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) { - response := new(OffsetFetchResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) { - response := new(JoinGroupResponse) - - err := b.sendAndReceive(request, response) - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) { - response := new(SyncGroupResponse) - - err := b.sendAndReceive(request, response) - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) { - response := new(LeaveGroupResponse) - - err := b.sendAndReceive(request, response) - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) { - response := new(HeartbeatResponse) - - err := b.sendAndReceive(request, response) - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) { - response := new(ListGroupsResponse) - - err := b.sendAndReceive(request, response) - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) { - response := new(DescribeGroupsResponse) - - err := b.sendAndReceive(request, response) - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) { - response := new(ApiVersionsResponse) - - err := b.sendAndReceive(request, response) - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) { - b.lock.Lock() - defer b.lock.Unlock() - - if b.conn == nil { - if b.connErr != nil { - return nil, b.connErr - } - return nil, ErrNotConnected - } - - if !b.conf.Version.IsAtLeast(rb.requiredVersion()) { - return nil, ErrUnsupportedVersion - } - - req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req, b.conf.MetricRegistry) - if err != nil { - return nil, err - } - - err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) - if err != nil { - return nil, err - } - - requestTime := time.Now() - bytes, err := b.conn.Write(buf) - b.updateOutgoingCommunicationMetrics(bytes) - if err != nil { - return nil, err - } - b.correlationID++ - - if !promiseResponse { - // Record request latency without the response - b.updateRequestLatencyMetrics(time.Since(requestTime)) - return nil, nil - } - - promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)} - b.responses <- promise - - return &promise, nil -} - -func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error { - promise, err := b.send(req, res != nil) - - if err != nil { - return err - } - - if promise == nil { - return nil - } - - select { - case buf := <-promise.packets: - return versionedDecode(buf, res, req.version()) - case err = <-promise.errors: - return err - } -} - -func (b *Broker) decode(pd packetDecoder) (err error) { - b.id, err = pd.getInt32() - if err != nil { - return err - } - - host, err := pd.getString() - if err != nil { - return err - } - - port, err := pd.getInt32() - if err != nil { - return err - } - - b.addr = net.JoinHostPort(host, fmt.Sprint(port)) - if _, _, err := net.SplitHostPort(b.addr); err != nil { - return err - } - - return nil -} - -func (b *Broker) encode(pe packetEncoder) (err error) { - - host, portstr, err := net.SplitHostPort(b.addr) - if err != nil { - return err - } - port, err := strconv.Atoi(portstr) - if err != nil { - return err - } - - pe.putInt32(b.id) - - err = pe.putString(host) - if err != nil { - return err - } - - pe.putInt32(int32(port)) - - return nil -} - -func (b *Broker) responseReceiver() { - var dead error - header := make([]byte, 8) - for response := range b.responses { - if dead != nil { - response.errors <- dead - continue - } - - err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)) - if err != nil { - dead = err - response.errors <- err - continue - } - - bytesReadHeader, err := io.ReadFull(b.conn, header) - requestLatency := time.Since(response.requestTime) - if err != nil { - b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) - dead = err - response.errors <- err - continue - } - - decodedHeader := responseHeader{} - err = decode(header, &decodedHeader) - if err != nil { - b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) - dead = err - response.errors <- err - continue - } - if decodedHeader.correlationID != response.correlationID { - b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) - // TODO if decoded ID < cur ID, discard until we catch up - // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response - dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)} - response.errors <- dead - continue - } - - buf := make([]byte, decodedHeader.length-4) - bytesReadBody, err := io.ReadFull(b.conn, buf) - b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency) - if err != nil { - dead = err - response.errors <- err - continue - } - - response.packets <- buf - } - close(b.done) -} - -func (b *Broker) sendAndReceiveSASLPlainHandshake() error { - rb := &SaslHandshakeRequest{"PLAIN"} - req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req, b.conf.MetricRegistry) - if err != nil { - return err - } - - err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) - if err != nil { - return err - } - - requestTime := time.Now() - bytes, err := b.conn.Write(buf) - b.updateOutgoingCommunicationMetrics(bytes) - if err != nil { - Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error()) - return err - } - b.correlationID++ - //wait for the response - header := make([]byte, 8) // response header - _, err = io.ReadFull(b.conn, header) - if err != nil { - Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error()) - return err - } - length := binary.BigEndian.Uint32(header[:4]) - payload := make([]byte, length-4) - n, err := io.ReadFull(b.conn, payload) - if err != nil { - Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error()) - return err - } - b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime)) - res := &SaslHandshakeResponse{} - err = versionedDecode(payload, res, 0) - if err != nil { - Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error()) - return err - } - if res.Err != ErrNoError { - Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error()) - return res.Err - } - Logger.Print("Successful SASL handshake") - return nil -} - -// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149) -// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9 -// -// In SASL Plain, Kafka expects the auth header to be in the following format -// Message format (from https://tools.ietf.org/html/rfc4616): -// -// message = [authzid] UTF8NUL authcid UTF8NUL passwd -// authcid = 1*SAFE ; MUST accept up to 255 octets -// authzid = 1*SAFE ; MUST accept up to 255 octets -// passwd = 1*SAFE ; MUST accept up to 255 octets -// UTF8NUL = %x00 ; UTF-8 encoded NUL character -// -// SAFE = UTF1 / UTF2 / UTF3 / UTF4 -// ;; any UTF-8 encoded Unicode character except NUL -// -// When credentials are valid, Kafka returns a 4 byte array of null characters. -// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way -// of responding to bad credentials but thats how its being done today. -func (b *Broker) sendAndReceiveSASLPlainAuth() error { - if b.conf.Net.SASL.Handshake { - handshakeErr := b.sendAndReceiveSASLPlainHandshake() - if handshakeErr != nil { - Logger.Printf("Error while performing SASL handshake %s\n", b.addr) - return handshakeErr - } - } - length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) - authBytes := make([]byte, length+4) //4 byte length header + auth data - binary.BigEndian.PutUint32(authBytes, uint32(length)) - copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)) - - err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) - if err != nil { - Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error()) - return err - } - - requestTime := time.Now() - bytesWritten, err := b.conn.Write(authBytes) - b.updateOutgoingCommunicationMetrics(bytesWritten) - if err != nil { - Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) - return err - } - - header := make([]byte, 4) - n, err := io.ReadFull(b.conn, header) - b.updateIncomingCommunicationMetrics(n, time.Since(requestTime)) - // If the credentials are valid, we would get a 4 byte response filled with null characters. - // Otherwise, the broker closes the connection and we get an EOF - if err != nil { - Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) - return err - } - - Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header) - return nil -} - -func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) { - b.updateRequestLatencyMetrics(requestLatency) - b.responseRate.Mark(1) - if b.brokerResponseRate != nil { - b.brokerResponseRate.Mark(1) - } - responseSize := int64(bytes) - b.incomingByteRate.Mark(responseSize) - if b.brokerIncomingByteRate != nil { - b.brokerIncomingByteRate.Mark(responseSize) - } - b.responseSize.Update(responseSize) - if b.brokerResponseSize != nil { - b.brokerResponseSize.Update(responseSize) - } -} - -func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) { - requestLatencyInMs := int64(requestLatency / time.Millisecond) - b.requestLatency.Update(requestLatencyInMs) - if b.brokerRequestLatency != nil { - b.brokerRequestLatency.Update(requestLatencyInMs) - } -} - -func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { - b.requestRate.Mark(1) - if b.brokerRequestRate != nil { - b.brokerRequestRate.Mark(1) - } - requestSize := int64(bytes) - b.outgoingByteRate.Mark(requestSize) - if b.brokerOutgoingByteRate != nil { - b.brokerOutgoingByteRate.Mark(requestSize) - } - b.requestSize.Update(requestSize) - if b.brokerRequestSize != nil { - b.brokerRequestSize.Update(requestSize) - } -} diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go deleted file mode 100644 index 3dbfc4b06f..0000000000 --- a/vendor/github.com/Shopify/sarama/client.go +++ /dev/null @@ -1,794 +0,0 @@ -package sarama - -import ( - "math/rand" - "sort" - "sync" - "time" -) - -// Client is a generic Kafka client. It manages connections to one or more Kafka brokers. -// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected -// automatically when it passes out of scope. It is safe to share a client amongst many -// users, however Kafka will process requests from a single client strictly in serial, -// so it is generally more efficient to use the default one client per producer/consumer. -type Client interface { - // Config returns the Config struct of the client. This struct should not be - // altered after it has been created. - Config() *Config - - // Brokers returns the current set of active brokers as retrieved from cluster metadata. - Brokers() []*Broker - - // Topics returns the set of available topics as retrieved from cluster metadata. - Topics() ([]string, error) - - // Partitions returns the sorted list of all partition IDs for the given topic. - Partitions(topic string) ([]int32, error) - - // WritablePartitions returns the sorted list of all writable partition IDs for - // the given topic, where "writable" means "having a valid leader accepting - // writes". - WritablePartitions(topic string) ([]int32, error) - - // Leader returns the broker object that is the leader of the current - // topic/partition, as determined by querying the cluster metadata. - Leader(topic string, partitionID int32) (*Broker, error) - - // Replicas returns the set of all replica IDs for the given partition. - Replicas(topic string, partitionID int32) ([]int32, error) - - // InSyncReplicas returns the set of all in-sync replica IDs for the given - // partition. In-sync replicas are replicas which are fully caught up with - // the partition leader. - InSyncReplicas(topic string, partitionID int32) ([]int32, error) - - // RefreshMetadata takes a list of topics and queries the cluster to refresh the - // available metadata for those topics. If no topics are provided, it will refresh - // metadata for all topics. - RefreshMetadata(topics ...string) error - - // GetOffset queries the cluster to get the most recent available offset at the - // given time (in milliseconds) on the topic/partition combination. - // Time should be OffsetOldest for the earliest available offset, - // OffsetNewest for the offset of the message that will be produced next, or a time. - GetOffset(topic string, partitionID int32, time int64) (int64, error) - - // Coordinator returns the coordinating broker for a consumer group. It will - // return a locally cached value if it's available. You can call - // RefreshCoordinator to update the cached value. This function only works on - // Kafka 0.8.2 and higher. - Coordinator(consumerGroup string) (*Broker, error) - - // RefreshCoordinator retrieves the coordinator for a consumer group and stores it - // in local cache. This function only works on Kafka 0.8.2 and higher. - RefreshCoordinator(consumerGroup string) error - - // Close shuts down all broker connections managed by this client. It is required - // to call this function before a client object passes out of scope, as it will - // otherwise leak memory. You must close any Producers or Consumers using a client - // before you close the client. - Close() error - - // Closed returns true if the client has already had Close called on it - Closed() bool -} - -const ( - // OffsetNewest stands for the log head offset, i.e. the offset that will be - // assigned to the next message that will be produced to the partition. You - // can send this to a client's GetOffset method to get this offset, or when - // calling ConsumePartition to start consuming new messages. - OffsetNewest int64 = -1 - // OffsetOldest stands for the oldest offset available on the broker for a - // partition. You can send this to a client's GetOffset method to get this - // offset, or when calling ConsumePartition to start consuming from the - // oldest offset that is still available on the broker. - OffsetOldest int64 = -2 -) - -type client struct { - conf *Config - closer, closed chan none // for shutting down background metadata updater - - // the broker addresses given to us through the constructor are not guaranteed to be returned in - // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?) - // so we store them separately - seedBrokers []*Broker - deadSeeds []*Broker - - brokers map[int32]*Broker // maps broker ids to brokers - metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata - coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs - - // If the number of partitions is large, we can get some churn calling cachedPartitions, - // so the result is cached. It is important to update this value whenever metadata is changed - cachedPartitionsResults map[string][maxPartitionIndex][]int32 - - lock sync.RWMutex // protects access to the maps that hold cluster state. -} - -// NewClient creates a new Client. It connects to one of the given broker addresses -// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot -// be retrieved from any of the given broker addresses, the client is not created. -func NewClient(addrs []string, conf *Config) (Client, error) { - Logger.Println("Initializing new client") - - if conf == nil { - conf = NewConfig() - } - - if err := conf.Validate(); err != nil { - return nil, err - } - - if len(addrs) < 1 { - return nil, ConfigurationError("You must provide at least one broker address") - } - - client := &client{ - conf: conf, - closer: make(chan none), - closed: make(chan none), - brokers: make(map[int32]*Broker), - metadata: make(map[string]map[int32]*PartitionMetadata), - cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32), - coordinators: make(map[string]int32), - } - - random := rand.New(rand.NewSource(time.Now().UnixNano())) - for _, index := range random.Perm(len(addrs)) { - client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) - } - - if conf.Metadata.Full { - // do an initial fetch of all cluster metadata by specifying an empty list of topics - err := client.RefreshMetadata() - switch err { - case nil: - break - case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed: - // indicates that maybe part of the cluster is down, but is not fatal to creating the client - Logger.Println(err) - default: - close(client.closed) // we haven't started the background updater yet, so we have to do this manually - _ = client.Close() - return nil, err - } - } - go withRecover(client.backgroundMetadataUpdater) - - Logger.Println("Successfully initialized new client") - - return client, nil -} - -func (client *client) Config() *Config { - return client.conf -} - -func (client *client) Brokers() []*Broker { - client.lock.RLock() - defer client.lock.RUnlock() - brokers := make([]*Broker, 0) - for _, broker := range client.brokers { - brokers = append(brokers, broker) - } - return brokers -} - -func (client *client) Close() error { - if client.Closed() { - // Chances are this is being called from a defer() and the error will go unobserved - // so we go ahead and log the event in this case. - Logger.Printf("Close() called on already closed client") - return ErrClosedClient - } - - // shutdown and wait for the background thread before we take the lock, to avoid races - close(client.closer) - <-client.closed - - client.lock.Lock() - defer client.lock.Unlock() - Logger.Println("Closing Client") - - for _, broker := range client.brokers { - safeAsyncClose(broker) - } - - for _, broker := range client.seedBrokers { - safeAsyncClose(broker) - } - - client.brokers = nil - client.metadata = nil - - return nil -} - -func (client *client) Closed() bool { - return client.brokers == nil -} - -func (client *client) Topics() ([]string, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - client.lock.RLock() - defer client.lock.RUnlock() - - ret := make([]string, 0, len(client.metadata)) - for topic := range client.metadata { - ret = append(ret, topic) - } - - return ret, nil -} - -func (client *client) Partitions(topic string) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - partitions := client.cachedPartitions(topic, allPartitions) - - if len(partitions) == 0 { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - partitions = client.cachedPartitions(topic, allPartitions) - } - - if partitions == nil { - return nil, ErrUnknownTopicOrPartition - } - - return partitions, nil -} - -func (client *client) WritablePartitions(topic string) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - partitions := client.cachedPartitions(topic, writablePartitions) - - // len==0 catches when it's nil (no such topic) and the odd case when every single - // partition is undergoing leader election simultaneously. Callers have to be able to handle - // this function returning an empty slice (which is a valid return value) but catching it - // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers - // a metadata refresh as a nicety so callers can just try again and don't have to manually - // trigger a refresh (otherwise they'd just keep getting a stale cached copy). - if len(partitions) == 0 { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - partitions = client.cachedPartitions(topic, writablePartitions) - } - - if partitions == nil { - return nil, ErrUnknownTopicOrPartition - } - - return partitions, nil -} - -func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - metadata := client.cachedMetadata(topic, partitionID) - - if metadata == nil { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - metadata = client.cachedMetadata(topic, partitionID) - } - - if metadata == nil { - return nil, ErrUnknownTopicOrPartition - } - - if metadata.Err == ErrReplicaNotAvailable { - return dupInt32Slice(metadata.Replicas), metadata.Err - } - return dupInt32Slice(metadata.Replicas), nil -} - -func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - metadata := client.cachedMetadata(topic, partitionID) - - if metadata == nil { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - metadata = client.cachedMetadata(topic, partitionID) - } - - if metadata == nil { - return nil, ErrUnknownTopicOrPartition - } - - if metadata.Err == ErrReplicaNotAvailable { - return dupInt32Slice(metadata.Isr), metadata.Err - } - return dupInt32Slice(metadata.Isr), nil -} - -func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - leader, err := client.cachedLeader(topic, partitionID) - - if leader == nil { - err = client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - leader, err = client.cachedLeader(topic, partitionID) - } - - return leader, err -} - -func (client *client) RefreshMetadata(topics ...string) error { - if client.Closed() { - return ErrClosedClient - } - - // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper - // error. This handles the case by returning an error instead of sending it - // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 - for _, topic := range topics { - if len(topic) == 0 { - return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return - } - } - - return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max) -} - -func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) { - if client.Closed() { - return -1, ErrClosedClient - } - - offset, err := client.getOffset(topic, partitionID, time) - - if err != nil { - if err := client.RefreshMetadata(topic); err != nil { - return -1, err - } - return client.getOffset(topic, partitionID, time) - } - - return offset, err -} - -func (client *client) Coordinator(consumerGroup string) (*Broker, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - coordinator := client.cachedCoordinator(consumerGroup) - - if coordinator == nil { - if err := client.RefreshCoordinator(consumerGroup); err != nil { - return nil, err - } - coordinator = client.cachedCoordinator(consumerGroup) - } - - if coordinator == nil { - return nil, ErrConsumerCoordinatorNotAvailable - } - - _ = coordinator.Open(client.conf) - return coordinator, nil -} - -func (client *client) RefreshCoordinator(consumerGroup string) error { - if client.Closed() { - return ErrClosedClient - } - - response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max) - if err != nil { - return err - } - - client.lock.Lock() - defer client.lock.Unlock() - client.registerBroker(response.Coordinator) - client.coordinators[consumerGroup] = response.Coordinator.ID() - return nil -} - -// private broker management helpers - -// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered -// in the brokers map. It returns the broker that is registered, which may be the provided broker, -// or a previously registered Broker instance. You must hold the write lock before calling this function. -func (client *client) registerBroker(broker *Broker) { - if client.brokers[broker.ID()] == nil { - client.brokers[broker.ID()] = broker - Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) - } else if broker.Addr() != client.brokers[broker.ID()].Addr() { - safeAsyncClose(client.brokers[broker.ID()]) - client.brokers[broker.ID()] = broker - Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) - } -} - -// deregisterBroker removes a broker from the seedsBroker list, and if it's -// not the seedbroker, removes it from brokers map completely. -func (client *client) deregisterBroker(broker *Broker) { - client.lock.Lock() - defer client.lock.Unlock() - - if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] { - client.deadSeeds = append(client.deadSeeds, broker) - client.seedBrokers = client.seedBrokers[1:] - } else { - // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever, - // but we really shouldn't have to; once that loop is made better this case can be - // removed, and the function generally can be renamed from `deregisterBroker` to - // `nextSeedBroker` or something - Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) - delete(client.brokers, broker.ID()) - } -} - -func (client *client) resurrectDeadBrokers() { - client.lock.Lock() - defer client.lock.Unlock() - - Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds)) - client.seedBrokers = append(client.seedBrokers, client.deadSeeds...) - client.deadSeeds = nil -} - -func (client *client) any() *Broker { - client.lock.RLock() - defer client.lock.RUnlock() - - if len(client.seedBrokers) > 0 { - _ = client.seedBrokers[0].Open(client.conf) - return client.seedBrokers[0] - } - - // not guaranteed to be random *or* deterministic - for _, broker := range client.brokers { - _ = broker.Open(client.conf) - return broker - } - - return nil -} - -// private caching/lazy metadata helpers - -type partitionType int - -const ( - allPartitions partitionType = iota - writablePartitions - // If you add any more types, update the partition cache in update() - - // Ensure this is the last partition type value - maxPartitionIndex -) - -func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata { - client.lock.RLock() - defer client.lock.RUnlock() - - partitions := client.metadata[topic] - if partitions != nil { - return partitions[partitionID] - } - - return nil -} - -func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 { - client.lock.RLock() - defer client.lock.RUnlock() - - partitions, exists := client.cachedPartitionsResults[topic] - - if !exists { - return nil - } - return partitions[partitionSet] -} - -func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 { - partitions := client.metadata[topic] - - if partitions == nil { - return nil - } - - ret := make([]int32, 0, len(partitions)) - for _, partition := range partitions { - if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable { - continue - } - ret = append(ret, partition.ID) - } - - sort.Sort(int32Slice(ret)) - return ret -} - -func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) { - client.lock.RLock() - defer client.lock.RUnlock() - - partitions := client.metadata[topic] - if partitions != nil { - metadata, ok := partitions[partitionID] - if ok { - if metadata.Err == ErrLeaderNotAvailable { - return nil, ErrLeaderNotAvailable - } - b := client.brokers[metadata.Leader] - if b == nil { - return nil, ErrLeaderNotAvailable - } - _ = b.Open(client.conf) - return b, nil - } - } - - return nil, ErrUnknownTopicOrPartition -} - -func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) { - broker, err := client.Leader(topic, partitionID) - if err != nil { - return -1, err - } - - request := &OffsetRequest{} - if client.conf.Version.IsAtLeast(V0_10_1_0) { - request.Version = 1 - } - request.AddBlock(topic, partitionID, time, 1) - - response, err := broker.GetAvailableOffsets(request) - if err != nil { - _ = broker.Close() - return -1, err - } - - block := response.GetBlock(topic, partitionID) - if block == nil { - _ = broker.Close() - return -1, ErrIncompleteResponse - } - if block.Err != ErrNoError { - return -1, block.Err - } - if len(block.Offsets) != 1 { - return -1, ErrOffsetOutOfRange - } - - return block.Offsets[0], nil -} - -// core metadata update logic - -func (client *client) backgroundMetadataUpdater() { - defer close(client.closed) - - if client.conf.Metadata.RefreshFrequency == time.Duration(0) { - return - } - - ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - topics := []string{} - if !client.conf.Metadata.Full { - if specificTopics, err := client.Topics(); err != nil { - Logger.Println("Client background metadata topic load:", err) - break - } else if len(specificTopics) == 0 { - Logger.Println("Client background metadata update: no specific topics to update") - break - } else { - topics = specificTopics - } - } - - if err := client.RefreshMetadata(topics...); err != nil { - Logger.Println("Client background metadata update:", err) - } - case <-client.closer: - return - } - } -} - -func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error { - retry := func(err error) error { - if attemptsRemaining > 0 { - Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) - time.Sleep(client.conf.Metadata.Retry.Backoff) - return client.tryRefreshMetadata(topics, attemptsRemaining-1) - } - return err - } - - for broker := client.any(); broker != nil; broker = client.any() { - if len(topics) > 0 { - Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) - } else { - Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) - } - response, err := broker.GetMetadata(&MetadataRequest{Topics: topics}) - - switch err.(type) { - case nil: - // valid response, use it - shouldRetry, err := client.updateMetadata(response) - if shouldRetry { - Logger.Println("client/metadata found some partitions to be leaderless") - return retry(err) // note: err can be nil - } - return err - - case PacketEncodingError: - // didn't even send, return the error - return err - default: - // some other error, remove that broker and try again - Logger.Println("client/metadata got error from broker while fetching metadata:", err) - _ = broker.Close() - client.deregisterBroker(broker) - } - } - - Logger.Println("client/metadata no available broker to send metadata request to") - client.resurrectDeadBrokers() - return retry(ErrOutOfBrokers) -} - -// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable -func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) { - client.lock.Lock() - defer client.lock.Unlock() - - // For all the brokers we received: - // - if it is a new ID, save it - // - if it is an existing ID, but the address we have is stale, discard the old one and save it - // - otherwise ignore it, replacing our existing one would just bounce the connection - for _, broker := range data.Brokers { - client.registerBroker(broker) - } - - for _, topic := range data.Topics { - delete(client.metadata, topic.Name) - delete(client.cachedPartitionsResults, topic.Name) - - switch topic.Err { - case ErrNoError: - break - case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results - err = topic.Err - continue - case ErrUnknownTopicOrPartition: // retry, do not store partial partition results - err = topic.Err - retry = true - continue - case ErrLeaderNotAvailable: // retry, but store partial partition results - retry = true - break - default: // don't retry, don't store partial results - Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err) - err = topic.Err - continue - } - - client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions)) - for _, partition := range topic.Partitions { - client.metadata[topic.Name][partition.ID] = partition - if partition.Err == ErrLeaderNotAvailable { - retry = true - } - } - - var partitionCache [maxPartitionIndex][]int32 - partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions) - partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions) - client.cachedPartitionsResults[topic.Name] = partitionCache - } - - return -} - -func (client *client) cachedCoordinator(consumerGroup string) *Broker { - client.lock.RLock() - defer client.lock.RUnlock() - if coordinatorID, ok := client.coordinators[consumerGroup]; ok { - return client.brokers[coordinatorID] - } - return nil -} - -func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) { - retry := func(err error) (*ConsumerMetadataResponse, error) { - if attemptsRemaining > 0 { - Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) - time.Sleep(client.conf.Metadata.Retry.Backoff) - return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1) - } - return nil, err - } - - for broker := client.any(); broker != nil; broker = client.any() { - Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr()) - - request := new(ConsumerMetadataRequest) - request.ConsumerGroup = consumerGroup - - response, err := broker.GetConsumerMetadata(request) - - if err != nil { - Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err) - - switch err.(type) { - case PacketEncodingError: - return nil, err - default: - _ = broker.Close() - client.deregisterBroker(broker) - continue - } - } - - switch response.Err { - case ErrNoError: - Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) - return response, nil - - case ErrConsumerCoordinatorNotAvailable: - Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup) - - // This is very ugly, but this scenario will only happen once per cluster. - // The __consumer_offsets topic only has to be created one time. - // The number of partitions not configurable, but partition 0 should always exist. - if _, err := client.Leader("__consumer_offsets", 0); err != nil { - Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n") - time.Sleep(2 * time.Second) - } - - return retry(ErrConsumerCoordinatorNotAvailable) - default: - return nil, response.Err - } - } - - Logger.Println("client/coordinator no available broker to send consumer metadata request to") - client.resurrectDeadBrokers() - return retry(ErrOutOfBrokers) -} diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go deleted file mode 100644 index e4ff680f2f..0000000000 --- a/vendor/github.com/Shopify/sarama/config.go +++ /dev/null @@ -1,442 +0,0 @@ -package sarama - -import ( - "crypto/tls" - "regexp" - "time" - - "github.com/rcrowley/go-metrics" -) - -const defaultClientID = "sarama" - -var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) - -// Config is used to pass multiple configuration options to Sarama's constructors. -type Config struct { - // Net is the namespace for network-level properties used by the Broker, and - // shared by the Client/Producer/Consumer. - Net struct { - // How many outstanding requests a connection is allowed to have before - // sending on it blocks (default 5). - MaxOpenRequests int - - // All three of the below configurations are similar to the - // `socket.timeout.ms` setting in JVM kafka. All of them default - // to 30 seconds. - DialTimeout time.Duration // How long to wait for the initial connection. - ReadTimeout time.Duration // How long to wait for a response. - WriteTimeout time.Duration // How long to wait for a transmit. - - TLS struct { - // Whether or not to use TLS when connecting to the broker - // (defaults to false). - Enable bool - // The TLS configuration to use for secure connections if - // enabled (defaults to nil). - Config *tls.Config - } - - // SASL based authentication with broker. While there are multiple SASL authentication methods - // the current implementation is limited to plaintext (SASL/PLAIN) authentication - SASL struct { - // Whether or not to use SASL authentication when connecting to the broker - // (defaults to false). - Enable bool - // Whether or not to send the Kafka SASL handshake first if enabled - // (defaults to true). You should only set this to false if you're using - // a non-Kafka SASL proxy. - Handshake bool - //username and password for SASL/PLAIN authentication - User string - Password string - } - - // KeepAlive specifies the keep-alive period for an active network connection. - // If zero, keep-alives are disabled. (default is 0: disabled). - KeepAlive time.Duration - } - - // Metadata is the namespace for metadata management properties used by the - // Client, and shared by the Producer/Consumer. - Metadata struct { - Retry struct { - // The total number of times to retry a metadata request when the - // cluster is in the middle of a leader election (default 3). - Max int - // How long to wait for leader election to occur before retrying - // (default 250ms). Similar to the JVM's `retry.backoff.ms`. - Backoff time.Duration - } - // How frequently to refresh the cluster metadata in the background. - // Defaults to 10 minutes. Set to 0 to disable. Similar to - // `topic.metadata.refresh.interval.ms` in the JVM version. - RefreshFrequency time.Duration - - // Whether to maintain a full set of metadata for all topics, or just - // the minimal set that has been necessary so far. The full set is simpler - // and usually more convenient, but can take up a substantial amount of - // memory if you have many topics and partitions. Defaults to true. - Full bool - } - - // Producer is the namespace for configuration related to producing messages, - // used by the Producer. - Producer struct { - // The maximum permitted size of a message (defaults to 1000000). Should be - // set equal to or smaller than the broker's `message.max.bytes`. - MaxMessageBytes int - // The level of acknowledgement reliability needed from the broker (defaults - // to WaitForLocal). Equivalent to the `request.required.acks` setting of the - // JVM producer. - RequiredAcks RequiredAcks - // The maximum duration the broker will wait the receipt of the number of - // RequiredAcks (defaults to 10 seconds). This is only relevant when - // RequiredAcks is set to WaitForAll or a number > 1. Only supports - // millisecond resolution, nanoseconds will be truncated. Equivalent to - // the JVM producer's `request.timeout.ms` setting. - Timeout time.Duration - // The type of compression to use on messages (defaults to no compression). - // Similar to `compression.codec` setting of the JVM producer. - Compression CompressionCodec - // Generates partitioners for choosing the partition to send messages to - // (defaults to hashing the message key). Similar to the `partitioner.class` - // setting for the JVM producer. - Partitioner PartitionerConstructor - - // Return specifies what channels will be populated. If they are set to true, - // you must read from the respective channels to prevent deadlock. If, - // however, this config is used to create a `SyncProducer`, both must be set - // to true and you shall not read from the channels since the producer does - // this internally. - Return struct { - // If enabled, successfully delivered messages will be returned on the - // Successes channel (default disabled). - Successes bool - - // If enabled, messages that failed to deliver will be returned on the - // Errors channel, including error (default enabled). - Errors bool - } - - // The following config options control how often messages are batched up and - // sent to the broker. By default, messages are sent as fast as possible, and - // all messages received while the current batch is in-flight are placed - // into the subsequent batch. - Flush struct { - // The best-effort number of bytes needed to trigger a flush. Use the - // global sarama.MaxRequestSize to set a hard upper limit. - Bytes int - // The best-effort number of messages needed to trigger a flush. Use - // `MaxMessages` to set a hard upper limit. - Messages int - // The best-effort frequency of flushes. Equivalent to - // `queue.buffering.max.ms` setting of JVM producer. - Frequency time.Duration - // The maximum number of messages the producer will send in a single - // broker request. Defaults to 0 for unlimited. Similar to - // `queue.buffering.max.messages` in the JVM producer. - MaxMessages int - } - - Retry struct { - // The total number of times to retry sending a message (default 3). - // Similar to the `message.send.max.retries` setting of the JVM producer. - Max int - // How long to wait for the cluster to settle between retries - // (default 100ms). Similar to the `retry.backoff.ms` setting of the - // JVM producer. - Backoff time.Duration - } - } - - // Consumer is the namespace for configuration related to consuming messages, - // used by the Consumer. - // - // Note that Sarama's Consumer type does not currently support automatic - // consumer-group rebalancing and offset tracking. For Zookeeper-based - // tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka - // library builds on Sarama to add this support. For Kafka-based tracking - // (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library - // builds on Sarama to add this support. - Consumer struct { - Retry struct { - // How long to wait after a failing to read from a partition before - // trying again (default 2s). - Backoff time.Duration - } - - // Fetch is the namespace for controlling how many bytes are retrieved by any - // given request. - Fetch struct { - // The minimum number of message bytes to fetch in a request - the broker - // will wait until at least this many are available. The default is 1, - // as 0 causes the consumer to spin when no messages are available. - // Equivalent to the JVM's `fetch.min.bytes`. - Min int32 - // The default number of message bytes to fetch from the broker in each - // request (default 32768). This should be larger than the majority of - // your messages, or else the consumer will spend a lot of time - // negotiating sizes and not actually consuming. Similar to the JVM's - // `fetch.message.max.bytes`. - Default int32 - // The maximum number of message bytes to fetch from the broker in a - // single request. Messages larger than this will return - // ErrMessageTooLarge and will not be consumable, so you must be sure - // this is at least as large as your largest message. Defaults to 0 - // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The - // global `sarama.MaxResponseSize` still applies. - Max int32 - } - // The maximum amount of time the broker will wait for Consumer.Fetch.Min - // bytes to become available before it returns fewer than that anyways. The - // default is 250ms, since 0 causes the consumer to spin when no events are - // available. 100-500ms is a reasonable range for most cases. Kafka only - // supports precision up to milliseconds; nanoseconds will be truncated. - // Equivalent to the JVM's `fetch.wait.max.ms`. - MaxWaitTime time.Duration - - // The maximum amount of time the consumer expects a message takes to - // process for the user. If writing to the Messages channel takes longer - // than this, that partition will stop fetching more messages until it - // can proceed again. - // Note that, since the Messages channel is buffered, the actual grace time is - // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms. - // If a message is not written to the Messages channel between two ticks - // of the expiryTicker then a timeout is detected. - // Using a ticker instead of a timer to detect timeouts should typically - // result in many fewer calls to Timer functions which may result in a - // significant performance improvement if many messages are being sent - // and timeouts are infrequent. - // The disadvantage of using a ticker instead of a timer is that - // timeouts will be less accurate. That is, the effective timeout could - // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For - // example, if `MaxProcessingTime` is 100ms then a delay of 180ms - // between two messages being sent may not be recognized as a timeout. - MaxProcessingTime time.Duration - - // Return specifies what channels will be populated. If they are set to true, - // you must read from them to prevent deadlock. - Return struct { - // If enabled, any errors that occurred while consuming are returned on - // the Errors channel (default disabled). - Errors bool - } - - // Offsets specifies configuration for how and when to commit consumed - // offsets. This currently requires the manual use of an OffsetManager - // but will eventually be automated. - Offsets struct { - // How frequently to commit updated offsets. Defaults to 1s. - CommitInterval time.Duration - - // The initial offset to use if no offset was previously committed. - // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest. - Initial int64 - - // The retention duration for committed offsets. If zero, disabled - // (in which case the `offsets.retention.minutes` option on the - // broker will be used). Kafka only supports precision up to - // milliseconds; nanoseconds will be truncated. Requires Kafka - // broker version 0.9.0 or later. - // (default is 0: disabled). - Retention time.Duration - } - } - - // A user-provided string sent with every request to the brokers for logging, - // debugging, and auditing purposes. Defaults to "sarama", but you should - // probably set it to something specific to your application. - ClientID string - // The number of events to buffer in internal and external channels. This - // permits the producer and consumer to continue processing some messages - // in the background while user code is working, greatly improving throughput. - // Defaults to 256. - ChannelBufferSize int - // The version of Kafka that Sarama will assume it is running against. - // Defaults to the oldest supported stable version. Since Kafka provides - // backwards-compatibility, setting it to a version older than you have - // will not break anything, although it may prevent you from using the - // latest features. Setting it to a version greater than you are actually - // running may lead to random breakage. - Version KafkaVersion - // The registry to define metrics into. - // Defaults to a local registry. - // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true" - // prior to starting Sarama. - // See Examples on how to use the metrics registry - MetricRegistry metrics.Registry -} - -// NewConfig returns a new configuration instance with sane defaults. -func NewConfig() *Config { - c := &Config{} - - c.Net.MaxOpenRequests = 5 - c.Net.DialTimeout = 30 * time.Second - c.Net.ReadTimeout = 30 * time.Second - c.Net.WriteTimeout = 30 * time.Second - c.Net.SASL.Handshake = true - - c.Metadata.Retry.Max = 3 - c.Metadata.Retry.Backoff = 250 * time.Millisecond - c.Metadata.RefreshFrequency = 10 * time.Minute - c.Metadata.Full = true - - c.Producer.MaxMessageBytes = 1000000 - c.Producer.RequiredAcks = WaitForLocal - c.Producer.Timeout = 10 * time.Second - c.Producer.Partitioner = NewHashPartitioner - c.Producer.Retry.Max = 3 - c.Producer.Retry.Backoff = 100 * time.Millisecond - c.Producer.Return.Errors = true - - c.Consumer.Fetch.Min = 1 - c.Consumer.Fetch.Default = 32768 - c.Consumer.Retry.Backoff = 2 * time.Second - c.Consumer.MaxWaitTime = 250 * time.Millisecond - c.Consumer.MaxProcessingTime = 100 * time.Millisecond - c.Consumer.Return.Errors = false - c.Consumer.Offsets.CommitInterval = 1 * time.Second - c.Consumer.Offsets.Initial = OffsetNewest - - c.ClientID = defaultClientID - c.ChannelBufferSize = 256 - c.Version = minVersion - c.MetricRegistry = metrics.NewRegistry() - - return c -} - -// Validate checks a Config instance. It will return a -// ConfigurationError if the specified values don't make sense. -func (c *Config) Validate() error { - // some configuration values should be warned on but not fail completely, do those first - if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil { - Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.") - } - if c.Net.SASL.Enable == false { - if c.Net.SASL.User != "" { - Logger.Println("Net.SASL is disabled but a non-empty username was provided.") - } - if c.Net.SASL.Password != "" { - Logger.Println("Net.SASL is disabled but a non-empty password was provided.") - } - } - if c.Producer.RequiredAcks > 1 { - Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.") - } - if c.Producer.MaxMessageBytes >= int(MaxRequestSize) { - Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.") - } - if c.Producer.Flush.Bytes >= int(MaxRequestSize) { - Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.") - } - if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 { - Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.") - } - if c.Producer.Timeout%time.Millisecond != 0 { - Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.") - } - if c.Consumer.MaxWaitTime < 100*time.Millisecond { - Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.") - } - if c.Consumer.MaxWaitTime%time.Millisecond != 0 { - Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.") - } - if c.Consumer.Offsets.Retention%time.Millisecond != 0 { - Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.") - } - if c.ClientID == defaultClientID { - Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.") - } - - // validate Net values - switch { - case c.Net.MaxOpenRequests <= 0: - return ConfigurationError("Net.MaxOpenRequests must be > 0") - case c.Net.DialTimeout <= 0: - return ConfigurationError("Net.DialTimeout must be > 0") - case c.Net.ReadTimeout <= 0: - return ConfigurationError("Net.ReadTimeout must be > 0") - case c.Net.WriteTimeout <= 0: - return ConfigurationError("Net.WriteTimeout must be > 0") - case c.Net.KeepAlive < 0: - return ConfigurationError("Net.KeepAlive must be >= 0") - case c.Net.SASL.Enable == true && c.Net.SASL.User == "": - return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") - case c.Net.SASL.Enable == true && c.Net.SASL.Password == "": - return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") - } - - // validate the Metadata values - switch { - case c.Metadata.Retry.Max < 0: - return ConfigurationError("Metadata.Retry.Max must be >= 0") - case c.Metadata.Retry.Backoff < 0: - return ConfigurationError("Metadata.Retry.Backoff must be >= 0") - case c.Metadata.RefreshFrequency < 0: - return ConfigurationError("Metadata.RefreshFrequency must be >= 0") - } - - // validate the Producer values - switch { - case c.Producer.MaxMessageBytes <= 0: - return ConfigurationError("Producer.MaxMessageBytes must be > 0") - case c.Producer.RequiredAcks < -1: - return ConfigurationError("Producer.RequiredAcks must be >= -1") - case c.Producer.Timeout <= 0: - return ConfigurationError("Producer.Timeout must be > 0") - case c.Producer.Partitioner == nil: - return ConfigurationError("Producer.Partitioner must not be nil") - case c.Producer.Flush.Bytes < 0: - return ConfigurationError("Producer.Flush.Bytes must be >= 0") - case c.Producer.Flush.Messages < 0: - return ConfigurationError("Producer.Flush.Messages must be >= 0") - case c.Producer.Flush.Frequency < 0: - return ConfigurationError("Producer.Flush.Frequency must be >= 0") - case c.Producer.Flush.MaxMessages < 0: - return ConfigurationError("Producer.Flush.MaxMessages must be >= 0") - case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages: - return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set") - case c.Producer.Retry.Max < 0: - return ConfigurationError("Producer.Retry.Max must be >= 0") - case c.Producer.Retry.Backoff < 0: - return ConfigurationError("Producer.Retry.Backoff must be >= 0") - } - - if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) { - return ConfigurationError("lz4 compression requires Version >= V0_10_0_0") - } - - // validate the Consumer values - switch { - case c.Consumer.Fetch.Min <= 0: - return ConfigurationError("Consumer.Fetch.Min must be > 0") - case c.Consumer.Fetch.Default <= 0: - return ConfigurationError("Consumer.Fetch.Default must be > 0") - case c.Consumer.Fetch.Max < 0: - return ConfigurationError("Consumer.Fetch.Max must be >= 0") - case c.Consumer.MaxWaitTime < 1*time.Millisecond: - return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms") - case c.Consumer.MaxProcessingTime <= 0: - return ConfigurationError("Consumer.MaxProcessingTime must be > 0") - case c.Consumer.Retry.Backoff < 0: - return ConfigurationError("Consumer.Retry.Backoff must be >= 0") - case c.Consumer.Offsets.CommitInterval <= 0: - return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0") - case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest: - return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest") - - } - - // validate misc shared values - switch { - case c.ChannelBufferSize < 0: - return ConfigurationError("ChannelBufferSize must be >= 0") - case !validID.MatchString(c.ClientID): - return ConfigurationError("ClientID is invalid") - } - - return nil -} diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go deleted file mode 100644 index 1a07289455..0000000000 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ /dev/null @@ -1,806 +0,0 @@ -package sarama - -import ( - "errors" - "fmt" - "sync" - "sync/atomic" - "time" -) - -// ConsumerMessage encapsulates a Kafka message returned by the consumer. -type ConsumerMessage struct { - Key, Value []byte - Topic string - Partition int32 - Offset int64 - Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp - BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp - Headers []*RecordHeader // only set if kafka is version 0.11+ -} - -// ConsumerError is what is provided to the user when an error occurs. -// It wraps an error and includes the topic and partition. -type ConsumerError struct { - Topic string - Partition int32 - Err error -} - -func (ce ConsumerError) Error() string { - return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err) -} - -// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface. -// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors -// when stopping. -type ConsumerErrors []*ConsumerError - -func (ce ConsumerErrors) Error() string { - return fmt.Sprintf("kafka: %d errors while consuming", len(ce)) -} - -// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close() -// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of -// scope. -// -// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking. -// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library -// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the -// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. -type Consumer interface { - - // Topics returns the set of available topics as retrieved from the cluster - // metadata. This method is the same as Client.Topics(), and is provided for - // convenience. - Topics() ([]string, error) - - // Partitions returns the sorted list of all partition IDs for the given topic. - // This method is the same as Client.Partitions(), and is provided for convenience. - Partitions(topic string) ([]int32, error) - - // ConsumePartition creates a PartitionConsumer on the given topic/partition with - // the given offset. It will return an error if this Consumer is already consuming - // on the given topic/partition. Offset can be a literal offset, or OffsetNewest - // or OffsetOldest - ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) - - // HighWaterMarks returns the current high water marks for each topic and partition. - // Consistency between partitions is not guaranteed since high water marks are updated separately. - HighWaterMarks() map[string]map[int32]int64 - - // Close shuts down the consumer. It must be called after all child - // PartitionConsumers have already been closed. - Close() error -} - -type consumer struct { - client Client - conf *Config - ownClient bool - - lock sync.Mutex - children map[string]map[int32]*partitionConsumer - brokerConsumers map[*Broker]*brokerConsumer -} - -// NewConsumer creates a new consumer using the given broker addresses and configuration. -func NewConsumer(addrs []string, config *Config) (Consumer, error) { - client, err := NewClient(addrs, config) - if err != nil { - return nil, err - } - - c, err := NewConsumerFromClient(client) - if err != nil { - return nil, err - } - c.(*consumer).ownClient = true - return c, nil -} - -// NewConsumerFromClient creates a new consumer using the given client. It is still -// necessary to call Close() on the underlying client when shutting down this consumer. -func NewConsumerFromClient(client Client) (Consumer, error) { - // Check that we are not dealing with a closed Client before processing any other arguments - if client.Closed() { - return nil, ErrClosedClient - } - - c := &consumer{ - client: client, - conf: client.Config(), - children: make(map[string]map[int32]*partitionConsumer), - brokerConsumers: make(map[*Broker]*brokerConsumer), - } - - return c, nil -} - -func (c *consumer) Close() error { - if c.ownClient { - return c.client.Close() - } - return nil -} - -func (c *consumer) Topics() ([]string, error) { - return c.client.Topics() -} - -func (c *consumer) Partitions(topic string) ([]int32, error) { - return c.client.Partitions(topic) -} - -func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) { - child := &partitionConsumer{ - consumer: c, - conf: c.conf, - topic: topic, - partition: partition, - messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), - errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), - feeder: make(chan *FetchResponse, 1), - trigger: make(chan none, 1), - dying: make(chan none), - fetchSize: c.conf.Consumer.Fetch.Default, - } - - if err := child.chooseStartingOffset(offset); err != nil { - return nil, err - } - - var leader *Broker - var err error - if leader, err = c.client.Leader(child.topic, child.partition); err != nil { - return nil, err - } - - if err := c.addChild(child); err != nil { - return nil, err - } - - go withRecover(child.dispatcher) - go withRecover(child.responseFeeder) - - child.broker = c.refBrokerConsumer(leader) - child.broker.input <- child - - return child, nil -} - -func (c *consumer) HighWaterMarks() map[string]map[int32]int64 { - c.lock.Lock() - defer c.lock.Unlock() - - hwms := make(map[string]map[int32]int64) - for topic, p := range c.children { - hwm := make(map[int32]int64, len(p)) - for partition, pc := range p { - hwm[partition] = pc.HighWaterMarkOffset() - } - hwms[topic] = hwm - } - - return hwms -} - -func (c *consumer) addChild(child *partitionConsumer) error { - c.lock.Lock() - defer c.lock.Unlock() - - topicChildren := c.children[child.topic] - if topicChildren == nil { - topicChildren = make(map[int32]*partitionConsumer) - c.children[child.topic] = topicChildren - } - - if topicChildren[child.partition] != nil { - return ConfigurationError("That topic/partition is already being consumed") - } - - topicChildren[child.partition] = child - return nil -} - -func (c *consumer) removeChild(child *partitionConsumer) { - c.lock.Lock() - defer c.lock.Unlock() - - delete(c.children[child.topic], child.partition) -} - -func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer { - c.lock.Lock() - defer c.lock.Unlock() - - bc := c.brokerConsumers[broker] - if bc == nil { - bc = c.newBrokerConsumer(broker) - c.brokerConsumers[broker] = bc - } - - bc.refs++ - - return bc -} - -func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) { - c.lock.Lock() - defer c.lock.Unlock() - - brokerWorker.refs-- - - if brokerWorker.refs == 0 { - close(brokerWorker.input) - if c.brokerConsumers[brokerWorker.broker] == brokerWorker { - delete(c.brokerConsumers, brokerWorker.broker) - } - } -} - -func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { - c.lock.Lock() - defer c.lock.Unlock() - - delete(c.brokerConsumers, brokerWorker.broker) -} - -// PartitionConsumer - -// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or -// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out -// of scope. -// -// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range -// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported -// as out of range by the brokers. In this case you should decide what you want to do (try a different offset, -// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying. -// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set -// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement -// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. -// -// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of -// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process -// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call -// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will -// also drain the Messages channel, harvest all errors & return them once cleanup has completed. -type PartitionConsumer interface { - - // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you - // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this - // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call - // this before calling Close on the underlying client. - AsyncClose() - - // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain - // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service - // the Messages channel when this function is called, you will be competing with Close for messages; consider - // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes - // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client. - Close() error - - // Messages returns the read channel for the messages that are returned by - // the broker. - Messages() <-chan *ConsumerMessage - - // Errors returns a read channel of errors that occurred during consuming, if - // enabled. By default, errors are logged and not returned over this channel. - // If you want to implement any custom error handling, set your config's - // Consumer.Return.Errors setting to true, and read from this channel. - Errors() <-chan *ConsumerError - - // HighWaterMarkOffset returns the high water mark offset of the partition, - // i.e. the offset that will be used for the next message that will be produced. - // You can use this to determine how far behind the processing is. - HighWaterMarkOffset() int64 -} - -type partitionConsumer struct { - highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG - consumer *consumer - conf *Config - topic string - partition int32 - - broker *brokerConsumer - messages chan *ConsumerMessage - errors chan *ConsumerError - feeder chan *FetchResponse - - trigger, dying chan none - responseResult error - - fetchSize int32 - offset int64 -} - -var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing - -func (child *partitionConsumer) sendError(err error) { - cErr := &ConsumerError{ - Topic: child.topic, - Partition: child.partition, - Err: err, - } - - if child.conf.Consumer.Return.Errors { - child.errors <- cErr - } else { - Logger.Println(cErr) - } -} - -func (child *partitionConsumer) dispatcher() { - for range child.trigger { - select { - case <-child.dying: - close(child.trigger) - case <-time.After(child.conf.Consumer.Retry.Backoff): - if child.broker != nil { - child.consumer.unrefBrokerConsumer(child.broker) - child.broker = nil - } - - Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition) - if err := child.dispatch(); err != nil { - child.sendError(err) - child.trigger <- none{} - } - } - } - - if child.broker != nil { - child.consumer.unrefBrokerConsumer(child.broker) - } - child.consumer.removeChild(child) - close(child.feeder) -} - -func (child *partitionConsumer) dispatch() error { - if err := child.consumer.client.RefreshMetadata(child.topic); err != nil { - return err - } - - var leader *Broker - var err error - if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil { - return err - } - - child.broker = child.consumer.refBrokerConsumer(leader) - - child.broker.input <- child - - return nil -} - -func (child *partitionConsumer) chooseStartingOffset(offset int64) error { - newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest) - if err != nil { - return err - } - oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest) - if err != nil { - return err - } - - switch { - case offset == OffsetNewest: - child.offset = newestOffset - case offset == OffsetOldest: - child.offset = oldestOffset - case offset >= oldestOffset && offset <= newestOffset: - child.offset = offset - default: - return ErrOffsetOutOfRange - } - - return nil -} - -func (child *partitionConsumer) Messages() <-chan *ConsumerMessage { - return child.messages -} - -func (child *partitionConsumer) Errors() <-chan *ConsumerError { - return child.errors -} - -func (child *partitionConsumer) AsyncClose() { - // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes - // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and - // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will - // also just close itself) - close(child.dying) -} - -func (child *partitionConsumer) Close() error { - child.AsyncClose() - - go withRecover(func() { - for range child.messages { - // drain - } - }) - - var errors ConsumerErrors - for err := range child.errors { - errors = append(errors, err) - } - - if len(errors) > 0 { - return errors - } - return nil -} - -func (child *partitionConsumer) HighWaterMarkOffset() int64 { - return atomic.LoadInt64(&child.highWaterMarkOffset) -} - -func (child *partitionConsumer) responseFeeder() { - var msgs []*ConsumerMessage - msgSent := false - -feederLoop: - for response := range child.feeder { - msgs, child.responseResult = child.parseResponse(response) - expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime) - - for i, msg := range msgs { - messageSelect: - select { - case child.messages <- msg: - msgSent = true - case <-expiryTicker.C: - if !msgSent { - child.responseResult = errTimedOut - child.broker.acks.Done() - for _, msg = range msgs[i:] { - child.messages <- msg - } - child.broker.input <- child - continue feederLoop - } else { - // current message has not been sent, return to select - // statement - msgSent = false - goto messageSelect - } - } - } - - expiryTicker.Stop() - child.broker.acks.Done() - } - - close(child.messages) - close(child.errors) -} - -func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) { - var messages []*ConsumerMessage - var incomplete bool - prelude := true - - for _, msgBlock := range msgSet.Messages { - for _, msg := range msgBlock.Messages() { - offset := msg.Offset - if msg.Msg.Version >= 1 { - baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset - offset += baseOffset - } - if prelude && offset < child.offset { - continue - } - prelude = false - - if offset >= child.offset { - messages = append(messages, &ConsumerMessage{ - Topic: child.topic, - Partition: child.partition, - Key: msg.Msg.Key, - Value: msg.Msg.Value, - Offset: offset, - Timestamp: msg.Msg.Timestamp, - BlockTimestamp: msgBlock.Msg.Timestamp, - }) - child.offset = offset + 1 - } else { - incomplete = true - } - } - } - - if incomplete || len(messages) == 0 { - return nil, ErrIncompleteResponse - } - return messages, nil -} - -func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) { - var messages []*ConsumerMessage - var incomplete bool - prelude := true - - for _, rec := range batch.Records { - offset := batch.FirstOffset + rec.OffsetDelta - if prelude && offset < child.offset { - continue - } - prelude = false - - if offset >= child.offset { - messages = append(messages, &ConsumerMessage{ - Topic: child.topic, - Partition: child.partition, - Key: rec.Key, - Value: rec.Value, - Offset: offset, - Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta), - Headers: rec.Headers, - }) - child.offset = offset + 1 - } else { - incomplete = true - } - } - - if incomplete || len(messages) == 0 { - return nil, ErrIncompleteResponse - } - return messages, nil -} - -func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { - block := response.GetBlock(child.topic, child.partition) - if block == nil { - return nil, ErrIncompleteResponse - } - - if block.Err != ErrNoError { - return nil, block.Err - } - - nRecs, err := block.Records.numRecords() - if err != nil { - return nil, err - } - if nRecs == 0 { - partialTrailingMessage, err := block.Records.isPartial() - if err != nil { - return nil, err - } - // We got no messages. If we got a trailing one then we need to ask for more data. - // Otherwise we just poll again and wait for one to be produced... - if partialTrailingMessage { - if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max { - // we can't ask for more data, we've hit the configured limit - child.sendError(ErrMessageTooLarge) - child.offset++ // skip this one so we can keep processing future messages - } else { - child.fetchSize *= 2 - if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max { - child.fetchSize = child.conf.Consumer.Fetch.Max - } - } - } - - return nil, nil - } - - // we got messages, reset our fetch size in case it was increased for a previous request - child.fetchSize = child.conf.Consumer.Fetch.Default - atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) - - if control, err := block.Records.isControl(); err != nil || control { - return nil, err - } - - if block.Records.recordsType == legacyRecords { - return child.parseMessages(block.Records.msgSet) - } - return child.parseRecords(block.Records.recordBatch) -} - -// brokerConsumer - -type brokerConsumer struct { - consumer *consumer - broker *Broker - input chan *partitionConsumer - newSubscriptions chan []*partitionConsumer - wait chan none - subscriptions map[*partitionConsumer]none - acks sync.WaitGroup - refs int -} - -func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { - bc := &brokerConsumer{ - consumer: c, - broker: broker, - input: make(chan *partitionConsumer), - newSubscriptions: make(chan []*partitionConsumer), - wait: make(chan none), - subscriptions: make(map[*partitionConsumer]none), - refs: 0, - } - - go withRecover(bc.subscriptionManager) - go withRecover(bc.subscriptionConsumer) - - return bc -} - -func (bc *brokerConsumer) subscriptionManager() { - var buffer []*partitionConsumer - - // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer - // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks - // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give - // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, - // so the main goroutine can block waiting for work if it has none. - for { - if len(buffer) > 0 { - select { - case event, ok := <-bc.input: - if !ok { - goto done - } - buffer = append(buffer, event) - case bc.newSubscriptions <- buffer: - buffer = nil - case bc.wait <- none{}: - } - } else { - select { - case event, ok := <-bc.input: - if !ok { - goto done - } - buffer = append(buffer, event) - case bc.newSubscriptions <- nil: - } - } - } - -done: - close(bc.wait) - if len(buffer) > 0 { - bc.newSubscriptions <- buffer - } - close(bc.newSubscriptions) -} - -func (bc *brokerConsumer) subscriptionConsumer() { - <-bc.wait // wait for our first piece of work - - // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available - for newSubscriptions := range bc.newSubscriptions { - bc.updateSubscriptions(newSubscriptions) - - if len(bc.subscriptions) == 0 { - // We're about to be shut down or we're about to receive more subscriptions. - // Either way, the signal just hasn't propagated to our goroutine yet. - <-bc.wait - continue - } - - response, err := bc.fetchNewMessages() - - if err != nil { - Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err) - bc.abort(err) - return - } - - bc.acks.Add(len(bc.subscriptions)) - for child := range bc.subscriptions { - child.feeder <- response - } - bc.acks.Wait() - bc.handleResponses() - } -} - -func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) { - for _, child := range newSubscriptions { - bc.subscriptions[child] = none{} - Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) - } - - for child := range bc.subscriptions { - select { - case <-child.dying: - Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) - close(child.trigger) - delete(bc.subscriptions, child) - default: - break - } - } -} - -func (bc *brokerConsumer) handleResponses() { - // handles the response codes left for us by our subscriptions, and abandons ones that have been closed - for child := range bc.subscriptions { - result := child.responseResult - child.responseResult = nil - - switch result { - case nil: - break - case errTimedOut: - Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", - bc.broker.ID(), child.topic, child.partition) - delete(bc.subscriptions, child) - case ErrOffsetOutOfRange: - // there's no point in retrying this it will just fail the same way again - // shut it down and force the user to choose what to do - child.sendError(result) - Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result) - close(child.trigger) - delete(bc.subscriptions, child) - case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable: - // not an error, but does need redispatching - Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", - bc.broker.ID(), child.topic, child.partition, result) - child.trigger <- none{} - delete(bc.subscriptions, child) - default: - // dunno, tell the user and try redispatching - child.sendError(result) - Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", - bc.broker.ID(), child.topic, child.partition, result) - child.trigger <- none{} - delete(bc.subscriptions, child) - } - } -} - -func (bc *brokerConsumer) abort(err error) { - bc.consumer.abandonBrokerConsumer(bc) - _ = bc.broker.Close() // we don't care about the error this might return, we already have one - - for child := range bc.subscriptions { - child.sendError(err) - child.trigger <- none{} - } - - for newSubscriptions := range bc.newSubscriptions { - if len(newSubscriptions) == 0 { - <-bc.wait - continue - } - for _, child := range newSubscriptions { - child.sendError(err) - child.trigger <- none{} - } - } -} - -func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { - request := &FetchRequest{ - MinBytes: bc.consumer.conf.Consumer.Fetch.Min, - MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), - } - if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { - request.Version = 2 - } - if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) { - request.Version = 3 - request.MaxBytes = MaxResponseSize - } - if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) { - request.Version = 4 - request.Isolation = ReadUncommitted // We don't support yet transactions. - } - - for child := range bc.subscriptions { - request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) - } - - return bc.broker.Fetch(request) -} diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go deleted file mode 100644 index 9d92d350a5..0000000000 --- a/vendor/github.com/Shopify/sarama/consumer_group_members.go +++ /dev/null @@ -1,94 +0,0 @@ -package sarama - -type ConsumerGroupMemberMetadata struct { - Version int16 - Topics []string - UserData []byte -} - -func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { - pe.putInt16(m.Version) - - if err := pe.putStringArray(m.Topics); err != nil { - return err - } - - if err := pe.putBytes(m.UserData); err != nil { - return err - } - - return nil -} - -func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { - if m.Version, err = pd.getInt16(); err != nil { - return - } - - if m.Topics, err = pd.getStringArray(); err != nil { - return - } - - if m.UserData, err = pd.getBytes(); err != nil { - return - } - - return nil -} - -type ConsumerGroupMemberAssignment struct { - Version int16 - Topics map[string][]int32 - UserData []byte -} - -func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error { - pe.putInt16(m.Version) - - if err := pe.putArrayLength(len(m.Topics)); err != nil { - return err - } - - for topic, partitions := range m.Topics { - if err := pe.putString(topic); err != nil { - return err - } - if err := pe.putInt32Array(partitions); err != nil { - return err - } - } - - if err := pe.putBytes(m.UserData); err != nil { - return err - } - - return nil -} - -func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) { - if m.Version, err = pd.getInt16(); err != nil { - return - } - - var topicLen int - if topicLen, err = pd.getArrayLength(); err != nil { - return - } - - m.Topics = make(map[string][]int32, topicLen) - for i := 0; i < topicLen; i++ { - var topic string - if topic, err = pd.getString(); err != nil { - return - } - if m.Topics[topic], err = pd.getInt32Array(); err != nil { - return - } - } - - if m.UserData, err = pd.getBytes(); err != nil { - return - } - - return nil -} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go deleted file mode 100644 index 483be3354d..0000000000 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go +++ /dev/null @@ -1,26 +0,0 @@ -package sarama - -type ConsumerMetadataRequest struct { - ConsumerGroup string -} - -func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { - return pe.putString(r.ConsumerGroup) -} - -func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) { - r.ConsumerGroup, err = pd.getString() - return err -} - -func (r *ConsumerMetadataRequest) key() int16 { - return 10 -} - -func (r *ConsumerMetadataRequest) version() int16 { - return 0 -} - -func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { - return V0_8_2_0 -} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go deleted file mode 100644 index 6b9632bbaf..0000000000 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go +++ /dev/null @@ -1,85 +0,0 @@ -package sarama - -import ( - "net" - "strconv" -) - -type ConsumerMetadataResponse struct { - Err KError - Coordinator *Broker - CoordinatorID int32 // deprecated: use Coordinator.ID() - CoordinatorHost string // deprecated: use Coordinator.Addr() - CoordinatorPort int32 // deprecated: use Coordinator.Addr() -} - -func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - r.Err = KError(tmp) - - coordinator := new(Broker) - if err := coordinator.decode(pd); err != nil { - return err - } - if coordinator.addr == ":0" { - return nil - } - r.Coordinator = coordinator - - // this can all go away in 2.0, but we have to fill in deprecated fields to maintain - // backwards compatibility - host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) - if err != nil { - return err - } - port, err := strconv.ParseInt(portstr, 10, 32) - if err != nil { - return err - } - r.CoordinatorID = r.Coordinator.ID() - r.CoordinatorHost = host - r.CoordinatorPort = int32(port) - - return nil -} - -func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - if r.Coordinator != nil { - host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) - if err != nil { - return err - } - port, err := strconv.ParseInt(portstr, 10, 32) - if err != nil { - return err - } - pe.putInt32(r.Coordinator.ID()) - if err := pe.putString(host); err != nil { - return err - } - pe.putInt32(int32(port)) - return nil - } - pe.putInt32(r.CoordinatorID) - if err := pe.putString(r.CoordinatorHost); err != nil { - return err - } - pe.putInt32(r.CoordinatorPort) - return nil -} - -func (r *ConsumerMetadataResponse) key() int16 { - return 10 -} - -func (r *ConsumerMetadataResponse) version() int16 { - return 0 -} - -func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { - return V0_8_2_0 -} diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go deleted file mode 100644 index 1f144431a8..0000000000 --- a/vendor/github.com/Shopify/sarama/crc32_field.go +++ /dev/null @@ -1,69 +0,0 @@ -package sarama - -import ( - "encoding/binary" - "fmt" - "hash/crc32" -) - -type crcPolynomial int8 - -const ( - crcIEEE crcPolynomial = iota - crcCastagnoli -) - -var castagnoliTable = crc32.MakeTable(crc32.Castagnoli) - -// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. -type crc32Field struct { - startOffset int - polynomial crcPolynomial -} - -func (c *crc32Field) saveOffset(in int) { - c.startOffset = in -} - -func (c *crc32Field) reserveLength() int { - return 4 -} - -func newCRC32Field(polynomial crcPolynomial) *crc32Field { - return &crc32Field{polynomial: polynomial} -} - -func (c *crc32Field) run(curOffset int, buf []byte) error { - crc, err := c.crc(curOffset, buf) - if err != nil { - return err - } - binary.BigEndian.PutUint32(buf[c.startOffset:], crc) - return nil -} - -func (c *crc32Field) check(curOffset int, buf []byte) error { - crc, err := c.crc(curOffset, buf) - if err != nil { - return err - } - - expected := binary.BigEndian.Uint32(buf[c.startOffset:]) - if crc != expected { - return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)} - } - - return nil -} -func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) { - var tab *crc32.Table - switch c.polynomial { - case crcIEEE: - tab = crc32.IEEETable - case crcCastagnoli: - tab = castagnoliTable - default: - return 0, PacketDecodingError{"invalid CRC type"} - } - return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil -} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go deleted file mode 100644 index af321e9946..0000000000 --- a/vendor/github.com/Shopify/sarama/create_partitions_request.go +++ /dev/null @@ -1,121 +0,0 @@ -package sarama - -import "time" - -type CreatePartitionsRequest struct { - TopicPartitions map[string]*TopicPartition - Timeout time.Duration - ValidateOnly bool -} - -func (c *CreatePartitionsRequest) encode(pe packetEncoder) error { - if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil { - return err - } - - for topic, partition := range c.TopicPartitions { - if err := pe.putString(topic); err != nil { - return err - } - if err := partition.encode(pe); err != nil { - return err - } - } - - pe.putInt32(int32(c.Timeout / time.Millisecond)) - - pe.putBool(c.ValidateOnly) - - return nil -} - -func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) { - n, err := pd.getArrayLength() - if err != nil { - return err - } - c.TopicPartitions = make(map[string]*TopicPartition, n) - for i := 0; i < n; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - c.TopicPartitions[topic] = new(TopicPartition) - if err := c.TopicPartitions[topic].decode(pd, version); err != nil { - return err - } - } - - timeout, err := pd.getInt32() - if err != nil { - return err - } - c.Timeout = time.Duration(timeout) * time.Millisecond - - if c.ValidateOnly, err = pd.getBool(); err != nil { - return err - } - - return nil -} - -func (r *CreatePartitionsRequest) key() int16 { - return 37 -} - -func (r *CreatePartitionsRequest) version() int16 { - return 0 -} - -func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion { - return V1_0_0_0 -} - -type TopicPartition struct { - Count int32 - Assignment [][]int32 -} - -func (t *TopicPartition) encode(pe packetEncoder) error { - pe.putInt32(t.Count) - - if len(t.Assignment) == 0 { - pe.putInt32(-1) - return nil - } - - if err := pe.putArrayLength(len(t.Assignment)); err != nil { - return err - } - - for _, assign := range t.Assignment { - if err := pe.putInt32Array(assign); err != nil { - return err - } - } - - return nil -} - -func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) { - if t.Count, err = pd.getInt32(); err != nil { - return err - } - - n, err := pd.getInt32() - if err != nil { - return err - } - if n <= 0 { - return nil - } - t.Assignment = make([][]int32, n) - - for i := 0; i < int(n); i++ { - if t.Assignment[i], err = pd.getInt32Array(); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go deleted file mode 100644 index abd621c64e..0000000000 --- a/vendor/github.com/Shopify/sarama/create_partitions_response.go +++ /dev/null @@ -1,94 +0,0 @@ -package sarama - -import "time" - -type CreatePartitionsResponse struct { - ThrottleTime time.Duration - TopicPartitionErrors map[string]*TopicPartitionError -} - -func (c *CreatePartitionsResponse) encode(pe packetEncoder) error { - pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) - if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil { - return err - } - - for topic, partitionError := range c.TopicPartitionErrors { - if err := pe.putString(topic); err != nil { - return err - } - if err := partitionError.encode(pe); err != nil { - return err - } - } - - return nil -} - -func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) { - throttleTime, err := pd.getInt32() - if err != nil { - return err - } - c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond - - n, err := pd.getArrayLength() - if err != nil { - return err - } - - c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n) - for i := 0; i < n; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - c.TopicPartitionErrors[topic] = new(TopicPartitionError) - if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil { - return err - } - } - - return nil -} - -func (r *CreatePartitionsResponse) key() int16 { - return 37 -} - -func (r *CreatePartitionsResponse) version() int16 { - return 0 -} - -func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion { - return V1_0_0_0 -} - -type TopicPartitionError struct { - Err KError - ErrMsg *string -} - -func (t *TopicPartitionError) encode(pe packetEncoder) error { - pe.putInt16(int16(t.Err)) - - if err := pe.putNullableString(t.ErrMsg); err != nil { - return err - } - - return nil -} - -func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) { - kerr, err := pd.getInt16() - if err != nil { - return err - } - t.Err = KError(kerr) - - if t.ErrMsg, err = pd.getNullableString(); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go deleted file mode 100644 index 1fb3567770..0000000000 --- a/vendor/github.com/Shopify/sarama/describe_groups_request.go +++ /dev/null @@ -1,30 +0,0 @@ -package sarama - -type DescribeGroupsRequest struct { - Groups []string -} - -func (r *DescribeGroupsRequest) encode(pe packetEncoder) error { - return pe.putStringArray(r.Groups) -} - -func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) { - r.Groups, err = pd.getStringArray() - return -} - -func (r *DescribeGroupsRequest) key() int16 { - return 15 -} - -func (r *DescribeGroupsRequest) version() int16 { - return 0 -} - -func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { - return V0_9_0_0 -} - -func (r *DescribeGroupsRequest) AddGroup(group string) { - r.Groups = append(r.Groups, group) -} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go deleted file mode 100644 index 542b3a9717..0000000000 --- a/vendor/github.com/Shopify/sarama/describe_groups_response.go +++ /dev/null @@ -1,187 +0,0 @@ -package sarama - -type DescribeGroupsResponse struct { - Groups []*GroupDescription -} - -func (r *DescribeGroupsResponse) encode(pe packetEncoder) error { - if err := pe.putArrayLength(len(r.Groups)); err != nil { - return err - } - - for _, groupDescription := range r.Groups { - if err := groupDescription.encode(pe); err != nil { - return err - } - } - - return nil -} - -func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) { - n, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Groups = make([]*GroupDescription, n) - for i := 0; i < n; i++ { - r.Groups[i] = new(GroupDescription) - if err := r.Groups[i].decode(pd); err != nil { - return err - } - } - - return nil -} - -func (r *DescribeGroupsResponse) key() int16 { - return 15 -} - -func (r *DescribeGroupsResponse) version() int16 { - return 0 -} - -func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { - return V0_9_0_0 -} - -type GroupDescription struct { - Err KError - GroupId string - State string - ProtocolType string - Protocol string - Members map[string]*GroupMemberDescription -} - -func (gd *GroupDescription) encode(pe packetEncoder) error { - pe.putInt16(int16(gd.Err)) - - if err := pe.putString(gd.GroupId); err != nil { - return err - } - if err := pe.putString(gd.State); err != nil { - return err - } - if err := pe.putString(gd.ProtocolType); err != nil { - return err - } - if err := pe.putString(gd.Protocol); err != nil { - return err - } - - if err := pe.putArrayLength(len(gd.Members)); err != nil { - return err - } - - for memberId, groupMemberDescription := range gd.Members { - if err := pe.putString(memberId); err != nil { - return err - } - if err := groupMemberDescription.encode(pe); err != nil { - return err - } - } - - return nil -} - -func (gd *GroupDescription) decode(pd packetDecoder) (err error) { - kerr, err := pd.getInt16() - if err != nil { - return err - } - - gd.Err = KError(kerr) - - if gd.GroupId, err = pd.getString(); err != nil { - return - } - if gd.State, err = pd.getString(); err != nil { - return - } - if gd.ProtocolType, err = pd.getString(); err != nil { - return - } - if gd.Protocol, err = pd.getString(); err != nil { - return - } - - n, err := pd.getArrayLength() - if err != nil { - return err - } - if n == 0 { - return nil - } - - gd.Members = make(map[string]*GroupMemberDescription) - for i := 0; i < n; i++ { - memberId, err := pd.getString() - if err != nil { - return err - } - - gd.Members[memberId] = new(GroupMemberDescription) - if err := gd.Members[memberId].decode(pd); err != nil { - return err - } - } - - return nil -} - -type GroupMemberDescription struct { - ClientId string - ClientHost string - MemberMetadata []byte - MemberAssignment []byte -} - -func (gmd *GroupMemberDescription) encode(pe packetEncoder) error { - if err := pe.putString(gmd.ClientId); err != nil { - return err - } - if err := pe.putString(gmd.ClientHost); err != nil { - return err - } - if err := pe.putBytes(gmd.MemberMetadata); err != nil { - return err - } - if err := pe.putBytes(gmd.MemberAssignment); err != nil { - return err - } - - return nil -} - -func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) { - if gmd.ClientId, err = pd.getString(); err != nil { - return - } - if gmd.ClientHost, err = pd.getString(); err != nil { - return - } - if gmd.MemberMetadata, err = pd.getBytes(); err != nil { - return - } - if gmd.MemberAssignment, err = pd.getBytes(); err != nil { - return - } - - return nil -} - -func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { - assignment := new(ConsumerGroupMemberAssignment) - err := decode(gmd.MemberAssignment, assignment) - return assignment, err -} - -func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) { - metadata := new(ConsumerGroupMemberMetadata) - err := decode(gmd.MemberMetadata, metadata) - return metadata, err -} diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml deleted file mode 100644 index 294fcdb413..0000000000 --- a/vendor/github.com/Shopify/sarama/dev.yml +++ /dev/null @@ -1,10 +0,0 @@ -name: sarama - -up: - - go: - version: '1.9' - -commands: - test: - run: make test - desc: 'run unit tests' diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go deleted file mode 100644 index 7ce3bc0f6e..0000000000 --- a/vendor/github.com/Shopify/sarama/encoder_decoder.go +++ /dev/null @@ -1,89 +0,0 @@ -package sarama - -import ( - "fmt" - - "github.com/rcrowley/go-metrics" -) - -// Encoder is the interface that wraps the basic Encode method. -// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules. -type encoder interface { - encode(pe packetEncoder) error -} - -// Encode takes an Encoder and turns it into bytes while potentially recording metrics. -func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) { - if e == nil { - return nil, nil - } - - var prepEnc prepEncoder - var realEnc realEncoder - - err := e.encode(&prepEnc) - if err != nil { - return nil, err - } - - if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) { - return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)} - } - - realEnc.raw = make([]byte, prepEnc.length) - realEnc.registry = metricRegistry - err = e.encode(&realEnc) - if err != nil { - return nil, err - } - - return realEnc.raw, nil -} - -// Decoder is the interface that wraps the basic Decode method. -// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules. -type decoder interface { - decode(pd packetDecoder) error -} - -type versionedDecoder interface { - decode(pd packetDecoder, version int16) error -} - -// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes, -// interpreted using Kafka's encoding rules. -func decode(buf []byte, in decoder) error { - if buf == nil { - return nil - } - - helper := realDecoder{raw: buf} - err := in.decode(&helper) - if err != nil { - return err - } - - if helper.off != len(buf) { - return PacketDecodingError{"invalid length"} - } - - return nil -} - -func versionedDecode(buf []byte, in versionedDecoder, version int16) error { - if buf == nil { - return nil - } - - helper := realDecoder{raw: buf} - err := in.decode(&helper, version) - if err != nil { - return err - } - - if helper.off != len(buf) { - return PacketDecodingError{"invalid length"} - } - - return nil -} diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go deleted file mode 100644 index b6242cd8e1..0000000000 --- a/vendor/github.com/Shopify/sarama/errors.go +++ /dev/null @@ -1,269 +0,0 @@ -package sarama - -import ( - "errors" - "fmt" -) - -// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored -// or otherwise failed to respond. -var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)") - -// ErrClosedClient is the error returned when a method is called on a client that has been closed. -var ErrClosedClient = errors.New("kafka: tried to use a client that was closed") - -// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does -// not contain the expected information. -var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks") - -// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index -// (meaning one outside of the range [0...numPartitions-1]). -var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index") - -// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting. -var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated") - -// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected. -var ErrNotConnected = errors.New("kafka: broker not connected") - -// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected -// when requesting messages, since as an optimization the server is allowed to return a partial message at the end -// of the message set. -var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected") - -// ErrShuttingDown is returned when a producer receives a message during shutdown. -var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down") - -// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max -var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max") - -// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, -// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. -type PacketEncodingError struct { - Info string -} - -func (err PacketEncodingError) Error() string { - return fmt.Sprintf("kafka: error encoding packet: %s", err.Info) -} - -// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response. -// This can be a bad CRC or length field, or any other invalid value. -type PacketDecodingError struct { - Info string -} - -func (err PacketDecodingError) Error() string { - return fmt.Sprintf("kafka: error decoding packet: %s", err.Info) -} - -// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer) -// when the specified configuration is invalid. -type ConfigurationError string - -func (err ConfigurationError) Error() string { - return "kafka: invalid configuration (" + string(err) + ")" -} - -// KError is the type of error that can be returned directly by the Kafka broker. -// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes -type KError int16 - -// Numeric error codes returned by the Kafka server. -const ( - ErrNoError KError = 0 - ErrUnknown KError = -1 - ErrOffsetOutOfRange KError = 1 - ErrInvalidMessage KError = 2 - ErrUnknownTopicOrPartition KError = 3 - ErrInvalidMessageSize KError = 4 - ErrLeaderNotAvailable KError = 5 - ErrNotLeaderForPartition KError = 6 - ErrRequestTimedOut KError = 7 - ErrBrokerNotAvailable KError = 8 - ErrReplicaNotAvailable KError = 9 - ErrMessageSizeTooLarge KError = 10 - ErrStaleControllerEpochCode KError = 11 - ErrOffsetMetadataTooLarge KError = 12 - ErrNetworkException KError = 13 - ErrOffsetsLoadInProgress KError = 14 - ErrConsumerCoordinatorNotAvailable KError = 15 - ErrNotCoordinatorForConsumer KError = 16 - ErrInvalidTopic KError = 17 - ErrMessageSetSizeTooLarge KError = 18 - ErrNotEnoughReplicas KError = 19 - ErrNotEnoughReplicasAfterAppend KError = 20 - ErrInvalidRequiredAcks KError = 21 - ErrIllegalGeneration KError = 22 - ErrInconsistentGroupProtocol KError = 23 - ErrInvalidGroupId KError = 24 - ErrUnknownMemberId KError = 25 - ErrInvalidSessionTimeout KError = 26 - ErrRebalanceInProgress KError = 27 - ErrInvalidCommitOffsetSize KError = 28 - ErrTopicAuthorizationFailed KError = 29 - ErrGroupAuthorizationFailed KError = 30 - ErrClusterAuthorizationFailed KError = 31 - ErrInvalidTimestamp KError = 32 - ErrUnsupportedSASLMechanism KError = 33 - ErrIllegalSASLState KError = 34 - ErrUnsupportedVersion KError = 35 - ErrTopicAlreadyExists KError = 36 - ErrInvalidPartitions KError = 37 - ErrInvalidReplicationFactor KError = 38 - ErrInvalidReplicaAssignment KError = 39 - ErrInvalidConfig KError = 40 - ErrNotController KError = 41 - ErrInvalidRequest KError = 42 - ErrUnsupportedForMessageFormat KError = 43 - ErrPolicyViolation KError = 44 - ErrOutOfOrderSequenceNumber KError = 45 - ErrDuplicateSequenceNumber KError = 46 - ErrInvalidProducerEpoch KError = 47 - ErrInvalidTxnState KError = 48 - ErrInvalidProducerIDMapping KError = 49 - ErrInvalidTransactionTimeout KError = 50 - ErrConcurrentTransactions KError = 51 - ErrTransactionCoordinatorFenced KError = 52 - ErrTransactionalIDAuthorizationFailed KError = 53 - ErrSecurityDisabled KError = 54 - ErrOperationNotAttempted KError = 55 - ErrKafkaStorageError KError = 56 - ErrLogDirNotFound KError = 57 - ErrSASLAuthenticationFailed KError = 58 - ErrUnknownProducerID KError = 59 - ErrReassignmentInProgress KError = 60 -) - -func (err KError) Error() string { - // Error messages stolen/adapted from - // https://kafka.apache.org/protocol#protocol_error_codes - switch err { - case ErrNoError: - return "kafka server: Not an error, why are you printing me?" - case ErrUnknown: - return "kafka server: Unexpected (unknown?) server error." - case ErrOffsetOutOfRange: - return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition." - case ErrInvalidMessage: - return "kafka server: Message contents does not match its CRC." - case ErrUnknownTopicOrPartition: - return "kafka server: Request was for a topic or partition that does not exist on this broker." - case ErrInvalidMessageSize: - return "kafka server: The message has a negative size." - case ErrLeaderNotAvailable: - return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes." - case ErrNotLeaderForPartition: - return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date." - case ErrRequestTimedOut: - return "kafka server: Request exceeded the user-specified time limit in the request." - case ErrBrokerNotAvailable: - return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!" - case ErrReplicaNotAvailable: - return "kafka server: Replica information not available, one or more brokers are down." - case ErrMessageSizeTooLarge: - return "kafka server: Message was too large, server rejected it to avoid allocation error." - case ErrStaleControllerEpochCode: - return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)." - case ErrOffsetMetadataTooLarge: - return "kafka server: Specified a string larger than the configured maximum for offset metadata." - case ErrNetworkException: - return "kafka server: The server disconnected before a response was received." - case ErrOffsetsLoadInProgress: - return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition." - case ErrConsumerCoordinatorNotAvailable: - return "kafka server: Offset's topic has not yet been created." - case ErrNotCoordinatorForConsumer: - return "kafka server: Request was for a consumer group that is not coordinated by this broker." - case ErrInvalidTopic: - return "kafka server: The request attempted to perform an operation on an invalid topic." - case ErrMessageSetSizeTooLarge: - return "kafka server: The request included message batch larger than the configured segment size on the server." - case ErrNotEnoughReplicas: - return "kafka server: Messages are rejected since there are fewer in-sync replicas than required." - case ErrNotEnoughReplicasAfterAppend: - return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required." - case ErrInvalidRequiredAcks: - return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)." - case ErrIllegalGeneration: - return "kafka server: The provided generation id is not the current generation." - case ErrInconsistentGroupProtocol: - return "kafka server: The provider group protocol type is incompatible with the other members." - case ErrInvalidGroupId: - return "kafka server: The provided group id was empty." - case ErrUnknownMemberId: - return "kafka server: The provided member is not known in the current generation." - case ErrInvalidSessionTimeout: - return "kafka server: The provided session timeout is outside the allowed range." - case ErrRebalanceInProgress: - return "kafka server: A rebalance for the group is in progress. Please re-join the group." - case ErrInvalidCommitOffsetSize: - return "kafka server: The provided commit metadata was too large." - case ErrTopicAuthorizationFailed: - return "kafka server: The client is not authorized to access this topic." - case ErrGroupAuthorizationFailed: - return "kafka server: The client is not authorized to access this group." - case ErrClusterAuthorizationFailed: - return "kafka server: The client is not authorized to send this request type." - case ErrInvalidTimestamp: - return "kafka server: The timestamp of the message is out of acceptable range." - case ErrUnsupportedSASLMechanism: - return "kafka server: The broker does not support the requested SASL mechanism." - case ErrIllegalSASLState: - return "kafka server: Request is not valid given the current SASL state." - case ErrUnsupportedVersion: - return "kafka server: The version of API is not supported." - case ErrTopicAlreadyExists: - return "kafka server: Topic with this name already exists." - case ErrInvalidPartitions: - return "kafka server: Number of partitions is invalid." - case ErrInvalidReplicationFactor: - return "kafka server: Replication-factor is invalid." - case ErrInvalidReplicaAssignment: - return "kafka server: Replica assignment is invalid." - case ErrInvalidConfig: - return "kafka server: Configuration is invalid." - case ErrNotController: - return "kafka server: This is not the correct controller for this cluster." - case ErrInvalidRequest: - return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details." - case ErrUnsupportedForMessageFormat: - return "kafka server: The requested operation is not supported by the message format version." - case ErrPolicyViolation: - return "kafka server: Request parameters do not satisfy the configured policy." - case ErrOutOfOrderSequenceNumber: - return "kafka server: The broker received an out of order sequence number." - case ErrDuplicateSequenceNumber: - return "kafka server: The broker received a duplicate sequence number." - case ErrInvalidProducerEpoch: - return "kafka server: Producer attempted an operation with an old epoch." - case ErrInvalidTxnState: - return "kafka server: The producer attempted a transactional operation in an invalid state." - case ErrInvalidProducerIDMapping: - return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id." - case ErrInvalidTransactionTimeout: - return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)." - case ErrConcurrentTransactions: - return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing." - case ErrTransactionCoordinatorFenced: - return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer." - case ErrTransactionalIDAuthorizationFailed: - return "kafka server: Transactional ID authorization failed." - case ErrSecurityDisabled: - return "kafka server: Security features are disabled." - case ErrOperationNotAttempted: - return "kafka server: The broker did not attempt to execute this operation." - case ErrKafkaStorageError: - return "kafka server: Disk error when trying to access log file on the disk." - case ErrLogDirNotFound: - return "kafka server: The specified log directory is not found in the broker config." - case ErrSASLAuthenticationFailed: - return "kafka server: SASL Authentication failed." - case ErrUnknownProducerID: - return "kafka server: The broker could not locate the producer metadata associated with the Producer ID." - case ErrReassignmentInProgress: - return "kafka server: A partition reassignment is in progress." - } - - return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) -} diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go deleted file mode 100644 index 8c8e3a5afc..0000000000 --- a/vendor/github.com/Shopify/sarama/fetch_request.go +++ /dev/null @@ -1,170 +0,0 @@ -package sarama - -type fetchRequestBlock struct { - fetchOffset int64 - maxBytes int32 -} - -func (b *fetchRequestBlock) encode(pe packetEncoder) error { - pe.putInt64(b.fetchOffset) - pe.putInt32(b.maxBytes) - return nil -} - -func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) { - if b.fetchOffset, err = pd.getInt64(); err != nil { - return err - } - if b.maxBytes, err = pd.getInt32(); err != nil { - return err - } - return nil -} - -// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See -// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at -// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes -type FetchRequest struct { - MaxWaitTime int32 - MinBytes int32 - MaxBytes int32 - Version int16 - Isolation IsolationLevel - blocks map[string]map[int32]*fetchRequestBlock -} - -type IsolationLevel int8 - -const ( - ReadUncommitted IsolationLevel = 0 - ReadCommitted IsolationLevel = 1 -) - -func (r *FetchRequest) encode(pe packetEncoder) (err error) { - pe.putInt32(-1) // replica ID is always -1 for clients - pe.putInt32(r.MaxWaitTime) - pe.putInt32(r.MinBytes) - if r.Version >= 3 { - pe.putInt32(r.MaxBytes) - } - if r.Version >= 4 { - pe.putInt8(int8(r.Isolation)) - } - err = pe.putArrayLength(len(r.blocks)) - if err != nil { - return err - } - for topic, blocks := range r.blocks { - err = pe.putString(topic) - if err != nil { - return err - } - err = pe.putArrayLength(len(blocks)) - if err != nil { - return err - } - for partition, block := range blocks { - pe.putInt32(partition) - err = block.encode(pe) - if err != nil { - return err - } - } - } - return nil -} - -func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { - r.Version = version - if _, err = pd.getInt32(); err != nil { - return err - } - if r.MaxWaitTime, err = pd.getInt32(); err != nil { - return err - } - if r.MinBytes, err = pd.getInt32(); err != nil { - return err - } - if r.Version >= 3 { - if r.MaxBytes, err = pd.getInt32(); err != nil { - return err - } - } - if r.Version >= 4 { - isolation, err := pd.getInt8() - if err != nil { - return err - } - r.Isolation = IsolationLevel(isolation) - } - topicCount, err := pd.getArrayLength() - if err != nil { - return err - } - if topicCount == 0 { - return nil - } - r.blocks = make(map[string]map[int32]*fetchRequestBlock) - for i := 0; i < topicCount; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - partitionCount, err := pd.getArrayLength() - if err != nil { - return err - } - r.blocks[topic] = make(map[int32]*fetchRequestBlock) - for j := 0; j < partitionCount; j++ { - partition, err := pd.getInt32() - if err != nil { - return err - } - fetchBlock := &fetchRequestBlock{} - if err = fetchBlock.decode(pd); err != nil { - return err - } - r.blocks[topic][partition] = fetchBlock - } - } - return nil -} - -func (r *FetchRequest) key() int16 { - return 1 -} - -func (r *FetchRequest) version() int16 { - return r.Version -} - -func (r *FetchRequest) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - case 3: - return V0_10_1_0 - case 4: - return V0_11_0_0 - default: - return minVersion - } -} - -func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) { - if r.blocks == nil { - r.blocks = make(map[string]map[int32]*fetchRequestBlock) - } - - if r.blocks[topic] == nil { - r.blocks[topic] = make(map[int32]*fetchRequestBlock) - } - - tmp := new(fetchRequestBlock) - tmp.maxBytes = maxBytes - tmp.fetchOffset = fetchOffset - - r.blocks[topic][partitionID] = tmp -} diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go deleted file mode 100644 index 3433bcfdb8..0000000000 --- a/vendor/github.com/Shopify/sarama/fetch_response.go +++ /dev/null @@ -1,315 +0,0 @@ -package sarama - -import "time" - -type AbortedTransaction struct { - ProducerID int64 - FirstOffset int64 -} - -func (t *AbortedTransaction) decode(pd packetDecoder) (err error) { - if t.ProducerID, err = pd.getInt64(); err != nil { - return err - } - - if t.FirstOffset, err = pd.getInt64(); err != nil { - return err - } - - return nil -} - -func (t *AbortedTransaction) encode(pe packetEncoder) (err error) { - pe.putInt64(t.ProducerID) - pe.putInt64(t.FirstOffset) - - return nil -} - -type FetchResponseBlock struct { - Err KError - HighWaterMarkOffset int64 - LastStableOffset int64 - AbortedTransactions []*AbortedTransaction - Records Records -} - -func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - b.Err = KError(tmp) - - b.HighWaterMarkOffset, err = pd.getInt64() - if err != nil { - return err - } - - if version >= 4 { - b.LastStableOffset, err = pd.getInt64() - if err != nil { - return err - } - - numTransact, err := pd.getArrayLength() - if err != nil { - return err - } - - if numTransact >= 0 { - b.AbortedTransactions = make([]*AbortedTransaction, numTransact) - } - - for i := 0; i < numTransact; i++ { - transact := new(AbortedTransaction) - if err = transact.decode(pd); err != nil { - return err - } - b.AbortedTransactions[i] = transact - } - } - - recordsSize, err := pd.getInt32() - if err != nil { - return err - } - - recordsDecoder, err := pd.getSubset(int(recordsSize)) - if err != nil { - return err - } - if recordsSize > 0 { - if err = b.Records.decode(recordsDecoder); err != nil { - return err - } - } - - return nil -} - -func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) { - pe.putInt16(int16(b.Err)) - - pe.putInt64(b.HighWaterMarkOffset) - - if version >= 4 { - pe.putInt64(b.LastStableOffset) - - if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil { - return err - } - for _, transact := range b.AbortedTransactions { - if err = transact.encode(pe); err != nil { - return err - } - } - } - - pe.push(&lengthField{}) - err = b.Records.encode(pe) - if err != nil { - return err - } - return pe.pop() -} - -type FetchResponse struct { - Blocks map[string]map[int32]*FetchResponseBlock - ThrottleTime time.Duration - Version int16 // v1 requires 0.9+, v2 requires 0.10+ -} - -func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { - r.Version = version - - if r.Version >= 1 { - throttle, err := pd.getInt32() - if err != nil { - return err - } - r.ThrottleTime = time.Duration(throttle) * time.Millisecond - } - - numTopics, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } - - numBlocks, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks) - - for j := 0; j < numBlocks; j++ { - id, err := pd.getInt32() - if err != nil { - return err - } - - block := new(FetchResponseBlock) - err = block.decode(pd, version) - if err != nil { - return err - } - r.Blocks[name][id] = block - } - } - - return nil -} - -func (r *FetchResponse) encode(pe packetEncoder) (err error) { - if r.Version >= 1 { - pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) - } - - err = pe.putArrayLength(len(r.Blocks)) - if err != nil { - return err - } - - for topic, partitions := range r.Blocks { - err = pe.putString(topic) - if err != nil { - return err - } - - err = pe.putArrayLength(len(partitions)) - if err != nil { - return err - } - - for id, block := range partitions { - pe.putInt32(id) - err = block.encode(pe, r.Version) - if err != nil { - return err - } - } - - } - return nil -} - -func (r *FetchResponse) key() int16 { - return 1 -} - -func (r *FetchResponse) version() int16 { - return r.Version -} - -func (r *FetchResponse) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - case 3: - return V0_10_1_0 - case 4: - return V0_11_0_0 - default: - return minVersion - } -} - -func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock { - if r.Blocks == nil { - return nil - } - - if r.Blocks[topic] == nil { - return nil - } - - return r.Blocks[topic][partition] -} - -func (r *FetchResponse) AddError(topic string, partition int32, err KError) { - if r.Blocks == nil { - r.Blocks = make(map[string]map[int32]*FetchResponseBlock) - } - partitions, ok := r.Blocks[topic] - if !ok { - partitions = make(map[int32]*FetchResponseBlock) - r.Blocks[topic] = partitions - } - frb, ok := partitions[partition] - if !ok { - frb = new(FetchResponseBlock) - partitions[partition] = frb - } - frb.Err = err -} - -func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock { - if r.Blocks == nil { - r.Blocks = make(map[string]map[int32]*FetchResponseBlock) - } - partitions, ok := r.Blocks[topic] - if !ok { - partitions = make(map[int32]*FetchResponseBlock) - r.Blocks[topic] = partitions - } - frb, ok := partitions[partition] - if !ok { - frb = new(FetchResponseBlock) - partitions[partition] = frb - } - - return frb -} - -func encodeKV(key, value Encoder) ([]byte, []byte) { - var kb []byte - var vb []byte - if key != nil { - kb, _ = key.Encode() - } - if value != nil { - vb, _ = value.Encode() - } - - return kb, vb -} - -func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { - frb := r.getOrCreateBlock(topic, partition) - kb, vb := encodeKV(key, value) - msg := &Message{Key: kb, Value: vb} - msgBlock := &MessageBlock{Msg: msg, Offset: offset} - set := frb.Records.msgSet - if set == nil { - set = &MessageSet{} - frb.Records = newLegacyRecords(set) - } - set.Messages = append(set.Messages, msgBlock) -} - -func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) { - frb := r.getOrCreateBlock(topic, partition) - kb, vb := encodeKV(key, value) - rec := &Record{Key: kb, Value: vb, OffsetDelta: offset} - batch := frb.Records.recordBatch - if batch == nil { - batch = &RecordBatch{Version: 2} - frb.Records = newDefaultRecords(batch) - } - batch.addRecord(rec) -} - -func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) { - frb := r.getOrCreateBlock(topic, partition) - frb.LastStableOffset = offset -} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go deleted file mode 100644 index ce49c47397..0000000000 --- a/vendor/github.com/Shopify/sarama/heartbeat_request.go +++ /dev/null @@ -1,47 +0,0 @@ -package sarama - -type HeartbeatRequest struct { - GroupId string - GenerationId int32 - MemberId string -} - -func (r *HeartbeatRequest) encode(pe packetEncoder) error { - if err := pe.putString(r.GroupId); err != nil { - return err - } - - pe.putInt32(r.GenerationId) - - if err := pe.putString(r.MemberId); err != nil { - return err - } - - return nil -} - -func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) { - if r.GroupId, err = pd.getString(); err != nil { - return - } - if r.GenerationId, err = pd.getInt32(); err != nil { - return - } - if r.MemberId, err = pd.getString(); err != nil { - return - } - - return nil -} - -func (r *HeartbeatRequest) key() int16 { - return 12 -} - -func (r *HeartbeatRequest) version() int16 { - return 0 -} - -func (r *HeartbeatRequest) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go deleted file mode 100644 index 766f5fdec6..0000000000 --- a/vendor/github.com/Shopify/sarama/heartbeat_response.go +++ /dev/null @@ -1,32 +0,0 @@ -package sarama - -type HeartbeatResponse struct { - Err KError -} - -func (r *HeartbeatResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - return nil -} - -func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error { - kerr, err := pd.getInt16() - if err != nil { - return err - } - r.Err = KError(kerr) - - return nil -} - -func (r *HeartbeatResponse) key() int16 { - return 12 -} - -func (r *HeartbeatResponse) version() int16 { - return 0 -} - -func (r *HeartbeatResponse) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go deleted file mode 100644 index 3a7ba17122..0000000000 --- a/vendor/github.com/Shopify/sarama/join_group_request.go +++ /dev/null @@ -1,143 +0,0 @@ -package sarama - -type GroupProtocol struct { - Name string - Metadata []byte -} - -func (p *GroupProtocol) decode(pd packetDecoder) (err error) { - p.Name, err = pd.getString() - if err != nil { - return err - } - p.Metadata, err = pd.getBytes() - return err -} - -func (p *GroupProtocol) encode(pe packetEncoder) (err error) { - if err := pe.putString(p.Name); err != nil { - return err - } - if err := pe.putBytes(p.Metadata); err != nil { - return err - } - return nil -} - -type JoinGroupRequest struct { - GroupId string - SessionTimeout int32 - MemberId string - ProtocolType string - GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols - OrderedGroupProtocols []*GroupProtocol -} - -func (r *JoinGroupRequest) encode(pe packetEncoder) error { - if err := pe.putString(r.GroupId); err != nil { - return err - } - pe.putInt32(r.SessionTimeout) - if err := pe.putString(r.MemberId); err != nil { - return err - } - if err := pe.putString(r.ProtocolType); err != nil { - return err - } - - if len(r.GroupProtocols) > 0 { - if len(r.OrderedGroupProtocols) > 0 { - return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"} - } - - if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil { - return err - } - for name, metadata := range r.GroupProtocols { - if err := pe.putString(name); err != nil { - return err - } - if err := pe.putBytes(metadata); err != nil { - return err - } - } - } else { - if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil { - return err - } - for _, protocol := range r.OrderedGroupProtocols { - if err := protocol.encode(pe); err != nil { - return err - } - } - } - - return nil -} - -func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { - if r.GroupId, err = pd.getString(); err != nil { - return - } - - if r.SessionTimeout, err = pd.getInt32(); err != nil { - return - } - - if r.MemberId, err = pd.getString(); err != nil { - return - } - - if r.ProtocolType, err = pd.getString(); err != nil { - return - } - - n, err := pd.getArrayLength() - if err != nil { - return err - } - if n == 0 { - return nil - } - - r.GroupProtocols = make(map[string][]byte) - for i := 0; i < n; i++ { - protocol := &GroupProtocol{} - if err := protocol.decode(pd); err != nil { - return err - } - r.GroupProtocols[protocol.Name] = protocol.Metadata - r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol) - } - - return nil -} - -func (r *JoinGroupRequest) key() int16 { - return 11 -} - -func (r *JoinGroupRequest) version() int16 { - return 0 -} - -func (r *JoinGroupRequest) requiredVersion() KafkaVersion { - return V0_9_0_0 -} - -func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { - r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{ - Name: name, - Metadata: metadata, - }) -} - -func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error { - bin, err := encode(metadata, nil) - if err != nil { - return err - } - - r.AddGroupProtocol(name, bin) - return nil -} diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go deleted file mode 100644 index 6d35fe3649..0000000000 --- a/vendor/github.com/Shopify/sarama/join_group_response.go +++ /dev/null @@ -1,115 +0,0 @@ -package sarama - -type JoinGroupResponse struct { - Err KError - GenerationId int32 - GroupProtocol string - LeaderId string - MemberId string - Members map[string][]byte -} - -func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) { - members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members)) - for id, bin := range r.Members { - meta := new(ConsumerGroupMemberMetadata) - if err := decode(bin, meta); err != nil { - return nil, err - } - members[id] = *meta - } - return members, nil -} - -func (r *JoinGroupResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - pe.putInt32(r.GenerationId) - - if err := pe.putString(r.GroupProtocol); err != nil { - return err - } - if err := pe.putString(r.LeaderId); err != nil { - return err - } - if err := pe.putString(r.MemberId); err != nil { - return err - } - - if err := pe.putArrayLength(len(r.Members)); err != nil { - return err - } - - for memberId, memberMetadata := range r.Members { - if err := pe.putString(memberId); err != nil { - return err - } - - if err := pe.putBytes(memberMetadata); err != nil { - return err - } - } - - return nil -} - -func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) { - kerr, err := pd.getInt16() - if err != nil { - return err - } - - r.Err = KError(kerr) - - if r.GenerationId, err = pd.getInt32(); err != nil { - return - } - - if r.GroupProtocol, err = pd.getString(); err != nil { - return - } - - if r.LeaderId, err = pd.getString(); err != nil { - return - } - - if r.MemberId, err = pd.getString(); err != nil { - return - } - - n, err := pd.getArrayLength() - if err != nil { - return err - } - if n == 0 { - return nil - } - - r.Members = make(map[string][]byte) - for i := 0; i < n; i++ { - memberId, err := pd.getString() - if err != nil { - return err - } - - memberMetadata, err := pd.getBytes() - if err != nil { - return err - } - - r.Members[memberId] = memberMetadata - } - - return nil -} - -func (r *JoinGroupResponse) key() int16 { - return 11 -} - -func (r *JoinGroupResponse) version() int16 { - return 0 -} - -func (r *JoinGroupResponse) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go deleted file mode 100644 index e177427482..0000000000 --- a/vendor/github.com/Shopify/sarama/leave_group_request.go +++ /dev/null @@ -1,40 +0,0 @@ -package sarama - -type LeaveGroupRequest struct { - GroupId string - MemberId string -} - -func (r *LeaveGroupRequest) encode(pe packetEncoder) error { - if err := pe.putString(r.GroupId); err != nil { - return err - } - if err := pe.putString(r.MemberId); err != nil { - return err - } - - return nil -} - -func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) { - if r.GroupId, err = pd.getString(); err != nil { - return - } - if r.MemberId, err = pd.getString(); err != nil { - return - } - - return nil -} - -func (r *LeaveGroupRequest) key() int16 { - return 13 -} - -func (r *LeaveGroupRequest) version() int16 { - return 0 -} - -func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go deleted file mode 100644 index d60c626da0..0000000000 --- a/vendor/github.com/Shopify/sarama/leave_group_response.go +++ /dev/null @@ -1,32 +0,0 @@ -package sarama - -type LeaveGroupResponse struct { - Err KError -} - -func (r *LeaveGroupResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - return nil -} - -func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) { - kerr, err := pd.getInt16() - if err != nil { - return err - } - r.Err = KError(kerr) - - return nil -} - -func (r *LeaveGroupResponse) key() int16 { - return 13 -} - -func (r *LeaveGroupResponse) version() int16 { - return 0 -} - -func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go deleted file mode 100644 index 576b1a6f6f..0000000000 --- a/vendor/github.com/Shopify/sarama/length_field.go +++ /dev/null @@ -1,69 +0,0 @@ -package sarama - -import "encoding/binary" - -// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths. -type lengthField struct { - startOffset int -} - -func (l *lengthField) saveOffset(in int) { - l.startOffset = in -} - -func (l *lengthField) reserveLength() int { - return 4 -} - -func (l *lengthField) run(curOffset int, buf []byte) error { - binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4)) - return nil -} - -func (l *lengthField) check(curOffset int, buf []byte) error { - if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) { - return PacketDecodingError{"length field invalid"} - } - - return nil -} - -type varintLengthField struct { - startOffset int - length int64 -} - -func (l *varintLengthField) decode(pd packetDecoder) error { - var err error - l.length, err = pd.getVarint() - return err -} - -func (l *varintLengthField) saveOffset(in int) { - l.startOffset = in -} - -func (l *varintLengthField) adjustLength(currOffset int) int { - oldFieldSize := l.reserveLength() - l.length = int64(currOffset - l.startOffset - oldFieldSize) - - return l.reserveLength() - oldFieldSize -} - -func (l *varintLengthField) reserveLength() int { - var tmp [binary.MaxVarintLen64]byte - return binary.PutVarint(tmp[:], l.length) -} - -func (l *varintLengthField) run(curOffset int, buf []byte) error { - binary.PutVarint(buf[l.startOffset:], l.length) - return nil -} - -func (l *varintLengthField) check(curOffset int, buf []byte) error { - if int64(curOffset-l.startOffset-l.reserveLength()) != l.length { - return PacketDecodingError{"length field invalid"} - } - - return nil -} diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go deleted file mode 100644 index 3b16abf7fa..0000000000 --- a/vendor/github.com/Shopify/sarama/list_groups_request.go +++ /dev/null @@ -1,24 +0,0 @@ -package sarama - -type ListGroupsRequest struct { -} - -func (r *ListGroupsRequest) encode(pe packetEncoder) error { - return nil -} - -func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) { - return nil -} - -func (r *ListGroupsRequest) key() int16 { - return 16 -} - -func (r *ListGroupsRequest) version() int16 { - return 0 -} - -func (r *ListGroupsRequest) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go deleted file mode 100644 index 56115d4c75..0000000000 --- a/vendor/github.com/Shopify/sarama/list_groups_response.go +++ /dev/null @@ -1,69 +0,0 @@ -package sarama - -type ListGroupsResponse struct { - Err KError - Groups map[string]string -} - -func (r *ListGroupsResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - - if err := pe.putArrayLength(len(r.Groups)); err != nil { - return err - } - for groupId, protocolType := range r.Groups { - if err := pe.putString(groupId); err != nil { - return err - } - if err := pe.putString(protocolType); err != nil { - return err - } - } - - return nil -} - -func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { - kerr, err := pd.getInt16() - if err != nil { - return err - } - - r.Err = KError(kerr) - - n, err := pd.getArrayLength() - if err != nil { - return err - } - if n == 0 { - return nil - } - - r.Groups = make(map[string]string) - for i := 0; i < n; i++ { - groupId, err := pd.getString() - if err != nil { - return err - } - protocolType, err := pd.getString() - if err != nil { - return err - } - - r.Groups[groupId] = protocolType - } - - return nil -} - -func (r *ListGroupsResponse) key() int16 { - return 16 -} - -func (r *ListGroupsResponse) version() int16 { - return 0 -} - -func (r *ListGroupsResponse) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go deleted file mode 100644 index bd5650bbc0..0000000000 --- a/vendor/github.com/Shopify/sarama/message.go +++ /dev/null @@ -1,200 +0,0 @@ -package sarama - -import ( - "bytes" - "compress/gzip" - "fmt" - "io/ioutil" - "time" - - "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4" -) - -// CompressionCodec represents the various compression codecs recognized by Kafka in messages. -type CompressionCodec int8 - -// only the last two bits are really used -const compressionCodecMask int8 = 0x03 - -const ( - CompressionNone CompressionCodec = 0 - CompressionGZIP CompressionCodec = 1 - CompressionSnappy CompressionCodec = 2 - CompressionLZ4 CompressionCodec = 3 -) - -type Message struct { - Codec CompressionCodec // codec used to compress the message contents - Key []byte // the message key, may be nil - Value []byte // the message contents - Set *MessageSet // the message set a message might wrap - Version int8 // v1 requires Kafka 0.10 - Timestamp time.Time // the timestamp of the message (version 1+ only) - - compressedCache []byte - compressedSize int // used for computing the compression ratio metrics -} - -func (m *Message) encode(pe packetEncoder) error { - pe.push(newCRC32Field(crcIEEE)) - - pe.putInt8(m.Version) - - attributes := int8(m.Codec) & compressionCodecMask - pe.putInt8(attributes) - - if m.Version >= 1 { - if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil { - return err - } - } - - err := pe.putBytes(m.Key) - if err != nil { - return err - } - - var payload []byte - - if m.compressedCache != nil { - payload = m.compressedCache - m.compressedCache = nil - } else if m.Value != nil { - switch m.Codec { - case CompressionNone: - payload = m.Value - case CompressionGZIP: - var buf bytes.Buffer - writer := gzip.NewWriter(&buf) - if _, err = writer.Write(m.Value); err != nil { - return err - } - if err = writer.Close(); err != nil { - return err - } - m.compressedCache = buf.Bytes() - payload = m.compressedCache - case CompressionSnappy: - tmp := snappy.Encode(m.Value) - m.compressedCache = tmp - payload = m.compressedCache - case CompressionLZ4: - var buf bytes.Buffer - writer := lz4.NewWriter(&buf) - if _, err = writer.Write(m.Value); err != nil { - return err - } - if err = writer.Close(); err != nil { - return err - } - m.compressedCache = buf.Bytes() - payload = m.compressedCache - - default: - return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)} - } - // Keep in mind the compressed payload size for metric gathering - m.compressedSize = len(payload) - } - - if err = pe.putBytes(payload); err != nil { - return err - } - - return pe.pop() -} - -func (m *Message) decode(pd packetDecoder) (err error) { - err = pd.push(newCRC32Field(crcIEEE)) - if err != nil { - return err - } - - m.Version, err = pd.getInt8() - if err != nil { - return err - } - - if m.Version > 1 { - return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)} - } - - attribute, err := pd.getInt8() - if err != nil { - return err - } - m.Codec = CompressionCodec(attribute & compressionCodecMask) - - if m.Version == 1 { - if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil { - return err - } - } - - m.Key, err = pd.getBytes() - if err != nil { - return err - } - - m.Value, err = pd.getBytes() - if err != nil { - return err - } - - // Required for deep equal assertion during tests but might be useful - // for future metrics about the compression ratio in fetch requests - m.compressedSize = len(m.Value) - - switch m.Codec { - case CompressionNone: - // nothing to do - case CompressionGZIP: - if m.Value == nil { - break - } - reader, err := gzip.NewReader(bytes.NewReader(m.Value)) - if err != nil { - return err - } - if m.Value, err = ioutil.ReadAll(reader); err != nil { - return err - } - if err := m.decodeSet(); err != nil { - return err - } - case CompressionSnappy: - if m.Value == nil { - break - } - if m.Value, err = snappy.Decode(m.Value); err != nil { - return err - } - if err := m.decodeSet(); err != nil { - return err - } - case CompressionLZ4: - if m.Value == nil { - break - } - reader := lz4.NewReader(bytes.NewReader(m.Value)) - if m.Value, err = ioutil.ReadAll(reader); err != nil { - return err - } - if err := m.decodeSet(); err != nil { - return err - } - - default: - return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)} - } - - return pd.pop() -} - -// decodes a message set from a previousy encoded bulk-message -func (m *Message) decodeSet() (err error) { - pd := realDecoder{raw: m.Value} - m.Set = &MessageSet{} - return m.Set.decode(&pd) -} diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go deleted file mode 100644 index f028784e51..0000000000 --- a/vendor/github.com/Shopify/sarama/message_set.go +++ /dev/null @@ -1,89 +0,0 @@ -package sarama - -type MessageBlock struct { - Offset int64 - Msg *Message -} - -// Messages convenience helper which returns either all the -// messages that are wrapped in this block -func (msb *MessageBlock) Messages() []*MessageBlock { - if msb.Msg.Set != nil { - return msb.Msg.Set.Messages - } - return []*MessageBlock{msb} -} - -func (msb *MessageBlock) encode(pe packetEncoder) error { - pe.putInt64(msb.Offset) - pe.push(&lengthField{}) - err := msb.Msg.encode(pe) - if err != nil { - return err - } - return pe.pop() -} - -func (msb *MessageBlock) decode(pd packetDecoder) (err error) { - if msb.Offset, err = pd.getInt64(); err != nil { - return err - } - - if err = pd.push(&lengthField{}); err != nil { - return err - } - - msb.Msg = new(Message) - if err = msb.Msg.decode(pd); err != nil { - return err - } - - if err = pd.pop(); err != nil { - return err - } - - return nil -} - -type MessageSet struct { - PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock - Messages []*MessageBlock -} - -func (ms *MessageSet) encode(pe packetEncoder) error { - for i := range ms.Messages { - err := ms.Messages[i].encode(pe) - if err != nil { - return err - } - } - return nil -} - -func (ms *MessageSet) decode(pd packetDecoder) (err error) { - ms.Messages = nil - - for pd.remaining() > 0 { - msb := new(MessageBlock) - err = msb.decode(pd) - switch err { - case nil: - ms.Messages = append(ms.Messages, msb) - case ErrInsufficientData: - // As an optimization the server is allowed to return a partial message at the - // end of the message set. Clients should handle this case. So we just ignore such things. - ms.PartialTrailingMessage = true - return nil - default: - return err - } - } - - return nil -} - -func (ms *MessageSet) addMessage(msg *Message) { - block := new(MessageBlock) - block.Msg = msg - ms.Messages = append(ms.Messages, block) -} diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go deleted file mode 100644 index 9a26b55fd0..0000000000 --- a/vendor/github.com/Shopify/sarama/metadata_request.go +++ /dev/null @@ -1,52 +0,0 @@ -package sarama - -type MetadataRequest struct { - Topics []string -} - -func (r *MetadataRequest) encode(pe packetEncoder) error { - err := pe.putArrayLength(len(r.Topics)) - if err != nil { - return err - } - - for i := range r.Topics { - err = pe.putString(r.Topics[i]) - if err != nil { - return err - } - } - return nil -} - -func (r *MetadataRequest) decode(pd packetDecoder, version int16) error { - topicCount, err := pd.getArrayLength() - if err != nil { - return err - } - if topicCount == 0 { - return nil - } - - r.Topics = make([]string, topicCount) - for i := range r.Topics { - topic, err := pd.getString() - if err != nil { - return err - } - r.Topics[i] = topic - } - return nil -} - -func (r *MetadataRequest) key() int16 { - return 3 -} - -func (r *MetadataRequest) version() int16 { - return 0 -} - -func (r *MetadataRequest) requiredVersion() KafkaVersion { - return minVersion -} diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go deleted file mode 100644 index f9d6a4271e..0000000000 --- a/vendor/github.com/Shopify/sarama/metadata_response.go +++ /dev/null @@ -1,239 +0,0 @@ -package sarama - -type PartitionMetadata struct { - Err KError - ID int32 - Leader int32 - Replicas []int32 - Isr []int32 -} - -func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - pm.Err = KError(tmp) - - pm.ID, err = pd.getInt32() - if err != nil { - return err - } - - pm.Leader, err = pd.getInt32() - if err != nil { - return err - } - - pm.Replicas, err = pd.getInt32Array() - if err != nil { - return err - } - - pm.Isr, err = pd.getInt32Array() - if err != nil { - return err - } - - return nil -} - -func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) { - pe.putInt16(int16(pm.Err)) - pe.putInt32(pm.ID) - pe.putInt32(pm.Leader) - - err = pe.putInt32Array(pm.Replicas) - if err != nil { - return err - } - - err = pe.putInt32Array(pm.Isr) - if err != nil { - return err - } - - return nil -} - -type TopicMetadata struct { - Err KError - Name string - Partitions []*PartitionMetadata -} - -func (tm *TopicMetadata) decode(pd packetDecoder) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - tm.Err = KError(tmp) - - tm.Name, err = pd.getString() - if err != nil { - return err - } - - n, err := pd.getArrayLength() - if err != nil { - return err - } - tm.Partitions = make([]*PartitionMetadata, n) - for i := 0; i < n; i++ { - tm.Partitions[i] = new(PartitionMetadata) - err = tm.Partitions[i].decode(pd) - if err != nil { - return err - } - } - - return nil -} - -func (tm *TopicMetadata) encode(pe packetEncoder) (err error) { - pe.putInt16(int16(tm.Err)) - - err = pe.putString(tm.Name) - if err != nil { - return err - } - - err = pe.putArrayLength(len(tm.Partitions)) - if err != nil { - return err - } - - for _, pm := range tm.Partitions { - err = pm.encode(pe) - if err != nil { - return err - } - } - - return nil -} - -type MetadataResponse struct { - Brokers []*Broker - Topics []*TopicMetadata -} - -func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { - n, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Brokers = make([]*Broker, n) - for i := 0; i < n; i++ { - r.Brokers[i] = new(Broker) - err = r.Brokers[i].decode(pd) - if err != nil { - return err - } - } - - n, err = pd.getArrayLength() - if err != nil { - return err - } - - r.Topics = make([]*TopicMetadata, n) - for i := 0; i < n; i++ { - r.Topics[i] = new(TopicMetadata) - err = r.Topics[i].decode(pd) - if err != nil { - return err - } - } - - return nil -} - -func (r *MetadataResponse) encode(pe packetEncoder) error { - err := pe.putArrayLength(len(r.Brokers)) - if err != nil { - return err - } - for _, broker := range r.Brokers { - err = broker.encode(pe) - if err != nil { - return err - } - } - - err = pe.putArrayLength(len(r.Topics)) - if err != nil { - return err - } - for _, tm := range r.Topics { - err = tm.encode(pe) - if err != nil { - return err - } - } - - return nil -} - -func (r *MetadataResponse) key() int16 { - return 3 -} - -func (r *MetadataResponse) version() int16 { - return 0 -} - -func (r *MetadataResponse) requiredVersion() KafkaVersion { - return minVersion -} - -// testing API - -func (r *MetadataResponse) AddBroker(addr string, id int32) { - r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr}) -} - -func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { - var tmatch *TopicMetadata - - for _, tm := range r.Topics { - if tm.Name == topic { - tmatch = tm - goto foundTopic - } - } - - tmatch = new(TopicMetadata) - tmatch.Name = topic - r.Topics = append(r.Topics, tmatch) - -foundTopic: - - tmatch.Err = err - return tmatch -} - -func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) { - tmatch := r.AddTopic(topic, ErrNoError) - var pmatch *PartitionMetadata - - for _, pm := range tmatch.Partitions { - if pm.ID == partition { - pmatch = pm - goto foundPartition - } - } - - pmatch = new(PartitionMetadata) - pmatch.ID = partition - tmatch.Partitions = append(tmatch.Partitions, pmatch) - -foundPartition: - - pmatch.Leader = brokerID - pmatch.Replicas = replicas - pmatch.Isr = isr - pmatch.Err = err - -} diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go deleted file mode 100644 index 4869708e94..0000000000 --- a/vendor/github.com/Shopify/sarama/metrics.go +++ /dev/null @@ -1,51 +0,0 @@ -package sarama - -import ( - "fmt" - "strings" - - "github.com/rcrowley/go-metrics" -) - -// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library: -// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution, -// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements. -// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38 -const ( - metricsReservoirSize = 1028 - metricsAlphaFactor = 0.015 -) - -func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram { - return r.GetOrRegister(name, func() metrics.Histogram { - return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor)) - }).(metrics.Histogram) -} - -func getMetricNameForBroker(name string, broker *Broker) string { - // Use broker id like the Java client as it does not contain '.' or ':' characters that - // can be interpreted as special character by monitoring tool (e.g. Graphite) - return fmt.Sprintf(name+"-for-broker-%d", broker.ID()) -} - -func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter { - return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r) -} - -func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram { - return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r) -} - -func getMetricNameForTopic(name string, topic string) string { - // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy - // cf. KAFKA-1902 and KAFKA-2337 - return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1)) -} - -func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter { - return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r) -} - -func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram { - return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r) -} diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go deleted file mode 100644 index 0734d34f6c..0000000000 --- a/vendor/github.com/Shopify/sarama/mockbroker.go +++ /dev/null @@ -1,324 +0,0 @@ -package sarama - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "net" - "reflect" - "strconv" - "sync" - "time" - - "github.com/davecgh/go-spew/spew" -) - -const ( - expectationTimeout = 500 * time.Millisecond -) - -type requestHandlerFunc func(req *request) (res encoder) - -// RequestNotifierFunc is invoked when a mock broker processes a request successfully -// and will provides the number of bytes read and written. -type RequestNotifierFunc func(bytesRead, bytesWritten int) - -// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed -// to facilitate testing of higher level or specialized consumers and producers -// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol, -// but rather provides a facility to do that. It takes care of the TCP -// transport, request unmarshaling, response marshaling, and makes it the test -// writer responsibility to program correct according to the Kafka API protocol -// MockBroker behaviour. -// -// MockBroker is implemented as a TCP server listening on a kernel-selected -// localhost port that can accept many connections. It reads Kafka requests -// from that connection and returns responses programmed by the SetHandlerByMap -// function. If a MockBroker receives a request that it has no programmed -// response for, then it returns nothing and the request times out. -// -// A set of MockRequest builders to define mappings used by MockBroker is -// provided by Sarama. But users can develop MockRequests of their own and use -// them along with or instead of the standard ones. -// -// When running tests with MockBroker it is strongly recommended to specify -// a timeout to `go test` so that if the broker hangs waiting for a response, -// the test panics. -// -// It is not necessary to prefix message length or correlation ID to your -// response bytes, the server does that automatically as a convenience. -type MockBroker struct { - brokerID int32 - port int32 - closing chan none - stopper chan none - expectations chan encoder - listener net.Listener - t TestReporter - latency time.Duration - handler requestHandlerFunc - notifier RequestNotifierFunc - history []RequestResponse - lock sync.Mutex -} - -// RequestResponse represents a Request/Response pair processed by MockBroker. -type RequestResponse struct { - Request protocolBody - Response encoder -} - -// SetLatency makes broker pause for the specified period every time before -// replying. -func (b *MockBroker) SetLatency(latency time.Duration) { - b.latency = latency -} - -// SetHandlerByMap defines mapping of Request types to MockResponses. When a -// request is received by the broker, it looks up the request type in the map -// and uses the found MockResponse instance to generate an appropriate reply. -// If the request type is not found in the map then nothing is sent. -func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { - b.setHandler(func(req *request) (res encoder) { - reqTypeName := reflect.TypeOf(req.body).Elem().Name() - mockResponse := handlerMap[reqTypeName] - if mockResponse == nil { - return nil - } - return mockResponse.For(req.body) - }) -} - -// SetNotifier set a function that will get invoked whenever a request has been -// processed successfully and will provide the number of bytes read and written -func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) { - b.lock.Lock() - b.notifier = notifier - b.lock.Unlock() -} - -// BrokerID returns broker ID assigned to the broker. -func (b *MockBroker) BrokerID() int32 { - return b.brokerID -} - -// History returns a slice of RequestResponse pairs in the order they were -// processed by the broker. Note that in case of multiple connections to the -// broker the order expected by a test can be different from the order recorded -// in the history, unless some synchronization is implemented in the test. -func (b *MockBroker) History() []RequestResponse { - b.lock.Lock() - history := make([]RequestResponse, len(b.history)) - copy(history, b.history) - b.lock.Unlock() - return history -} - -// Port returns the TCP port number the broker is listening for requests on. -func (b *MockBroker) Port() int32 { - return b.port -} - -// Addr returns the broker connection string in the form "
:". -func (b *MockBroker) Addr() string { - return b.listener.Addr().String() -} - -// Close terminates the broker blocking until it stops internal goroutines and -// releases all resources. -func (b *MockBroker) Close() { - close(b.expectations) - if len(b.expectations) > 0 { - buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID())) - for e := range b.expectations { - _, _ = buf.WriteString(spew.Sdump(e)) - } - b.t.Error(buf.String()) - } - close(b.closing) - <-b.stopper -} - -// setHandler sets the specified function as the request handler. Whenever -// a mock broker reads a request from the wire it passes the request to the -// function and sends back whatever the handler function returns. -func (b *MockBroker) setHandler(handler requestHandlerFunc) { - b.lock.Lock() - b.handler = handler - b.lock.Unlock() -} - -func (b *MockBroker) serverLoop() { - defer close(b.stopper) - var err error - var conn net.Conn - - go func() { - <-b.closing - err := b.listener.Close() - if err != nil { - b.t.Error(err) - } - }() - - wg := &sync.WaitGroup{} - i := 0 - for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() { - wg.Add(1) - go b.handleRequests(conn, i, wg) - i++ - } - wg.Wait() - Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) -} - -func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) { - defer wg.Done() - defer func() { - _ = conn.Close() - }() - Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx) - var err error - - abort := make(chan none) - defer close(abort) - go func() { - select { - case <-b.closing: - _ = conn.Close() - case <-abort: - } - }() - - resHeader := make([]byte, 8) - for { - req, bytesRead, err := decodeRequest(conn) - if err != nil { - Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) - b.serverError(err) - break - } - - if b.latency > 0 { - time.Sleep(b.latency) - } - - b.lock.Lock() - res := b.handler(req) - b.history = append(b.history, RequestResponse{req.body, res}) - b.lock.Unlock() - - if res == nil { - Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) - continue - } - Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) - - encodedRes, err := encode(res, nil) - if err != nil { - b.serverError(err) - break - } - if len(encodedRes) == 0 { - b.lock.Lock() - if b.notifier != nil { - b.notifier(bytesRead, 0) - } - b.lock.Unlock() - continue - } - - binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4)) - binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID)) - if _, err = conn.Write(resHeader); err != nil { - b.serverError(err) - break - } - if _, err = conn.Write(encodedRes); err != nil { - b.serverError(err) - break - } - - b.lock.Lock() - if b.notifier != nil { - b.notifier(bytesRead, len(resHeader)+len(encodedRes)) - } - b.lock.Unlock() - } - Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) -} - -func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) { - select { - case res, ok := <-b.expectations: - if !ok { - return nil - } - return res - case <-time.After(expectationTimeout): - return nil - } -} - -func (b *MockBroker) serverError(err error) { - isConnectionClosedError := false - if _, ok := err.(*net.OpError); ok { - isConnectionClosedError = true - } else if err == io.EOF { - isConnectionClosedError = true - } else if err.Error() == "use of closed network connection" { - isConnectionClosedError = true - } - - if isConnectionClosedError { - return - } - - b.t.Errorf(err.Error()) -} - -// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the -// test framework and a channel of responses to use. If an error occurs it is -// simply logged to the TestReporter and the broker exits. -func NewMockBroker(t TestReporter, brokerID int32) *MockBroker { - return NewMockBrokerAddr(t, brokerID, "localhost:0") -} - -// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give -// it rather than just some ephemeral port. -func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker { - var err error - - broker := &MockBroker{ - closing: make(chan none), - stopper: make(chan none), - t: t, - brokerID: brokerID, - expectations: make(chan encoder, 512), - } - broker.handler = broker.defaultRequestHandler - - broker.listener, err = net.Listen("tcp", addr) - if err != nil { - t.Fatal(err) - } - Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String()) - _, portStr, err := net.SplitHostPort(broker.listener.Addr().String()) - if err != nil { - t.Fatal(err) - } - tmp, err := strconv.ParseInt(portStr, 10, 32) - if err != nil { - t.Fatal(err) - } - broker.port = int32(tmp) - - go broker.serverLoop() - - return broker -} - -func (b *MockBroker) Returns(e encoder) { - b.expectations <- e -} diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go deleted file mode 100644 index 9659757b7c..0000000000 --- a/vendor/github.com/Shopify/sarama/mockresponses.go +++ /dev/null @@ -1,469 +0,0 @@ -package sarama - -import ( - "fmt" -) - -// TestReporter has methods matching go's testing.T to avoid importing -// `testing` in the main part of the library. -type TestReporter interface { - Error(...interface{}) - Errorf(string, ...interface{}) - Fatal(...interface{}) - Fatalf(string, ...interface{}) -} - -// MockResponse is a response builder interface it defines one method that -// allows generating a response based on a request body. MockResponses are used -// to program behavior of MockBroker in tests. -type MockResponse interface { - For(reqBody versionedDecoder) (res encoder) -} - -// MockWrapper is a mock response builder that returns a particular concrete -// response regardless of the actual request passed to the `For` method. -type MockWrapper struct { - res encoder -} - -func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) { - return mw.res -} - -func NewMockWrapper(res encoder) *MockWrapper { - return &MockWrapper{res: res} -} - -// MockSequence is a mock response builder that is created from a sequence of -// concrete responses. Every time when a `MockBroker` calls its `For` method -// the next response from the sequence is returned. When the end of the -// sequence is reached the last element from the sequence is returned. -type MockSequence struct { - responses []MockResponse -} - -func NewMockSequence(responses ...interface{}) *MockSequence { - ms := &MockSequence{} - ms.responses = make([]MockResponse, len(responses)) - for i, res := range responses { - switch res := res.(type) { - case MockResponse: - ms.responses[i] = res - case encoder: - ms.responses[i] = NewMockWrapper(res) - default: - panic(fmt.Sprintf("Unexpected response type: %T", res)) - } - } - return ms -} - -func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) { - res = mc.responses[0].For(reqBody) - if len(mc.responses) > 1 { - mc.responses = mc.responses[1:] - } - return res -} - -// MockMetadataResponse is a `MetadataResponse` builder. -type MockMetadataResponse struct { - leaders map[string]map[int32]int32 - brokers map[string]int32 - t TestReporter -} - -func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse { - return &MockMetadataResponse{ - leaders: make(map[string]map[int32]int32), - brokers: make(map[string]int32), - t: t, - } -} - -func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse { - partitions := mmr.leaders[topic] - if partitions == nil { - partitions = make(map[int32]int32) - mmr.leaders[topic] = partitions - } - partitions[partition] = brokerID - return mmr -} - -func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse { - mmr.brokers[addr] = brokerID - return mmr -} - -func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { - metadataRequest := reqBody.(*MetadataRequest) - metadataResponse := &MetadataResponse{} - for addr, brokerID := range mmr.brokers { - metadataResponse.AddBroker(addr, brokerID) - } - if len(metadataRequest.Topics) == 0 { - for topic, partitions := range mmr.leaders { - for partition, brokerID := range partitions { - metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) - } - } - return metadataResponse - } - for _, topic := range metadataRequest.Topics { - for partition, brokerID := range mmr.leaders[topic] { - metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) - } - } - return metadataResponse -} - -// MockOffsetResponse is an `OffsetResponse` builder. -type MockOffsetResponse struct { - offsets map[string]map[int32]map[int64]int64 - t TestReporter - version int16 -} - -func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { - return &MockOffsetResponse{ - offsets: make(map[string]map[int32]map[int64]int64), - t: t, - } -} - -func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse { - mor.version = version - return mor -} - -func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse { - partitions := mor.offsets[topic] - if partitions == nil { - partitions = make(map[int32]map[int64]int64) - mor.offsets[topic] = partitions - } - times := partitions[partition] - if times == nil { - times = make(map[int64]int64) - partitions[partition] = times - } - times[time] = offset - return mor -} - -func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder { - offsetRequest := reqBody.(*OffsetRequest) - offsetResponse := &OffsetResponse{Version: mor.version} - for topic, partitions := range offsetRequest.blocks { - for partition, block := range partitions { - offset := mor.getOffset(topic, partition, block.time) - offsetResponse.AddTopicPartition(topic, partition, offset) - } - } - return offsetResponse -} - -func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 { - partitions := mor.offsets[topic] - if partitions == nil { - mor.t.Errorf("missing topic: %s", topic) - } - times := partitions[partition] - if times == nil { - mor.t.Errorf("missing partition: %d", partition) - } - offset, ok := times[time] - if !ok { - mor.t.Errorf("missing time: %d", time) - } - return offset -} - -// MockFetchResponse is a `FetchResponse` builder. -type MockFetchResponse struct { - messages map[string]map[int32]map[int64]Encoder - highWaterMarks map[string]map[int32]int64 - t TestReporter - batchSize int - version int16 -} - -func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { - return &MockFetchResponse{ - messages: make(map[string]map[int32]map[int64]Encoder), - highWaterMarks: make(map[string]map[int32]int64), - t: t, - batchSize: batchSize, - } -} - -func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse { - mfr.version = version - return mfr -} - -func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { - partitions := mfr.messages[topic] - if partitions == nil { - partitions = make(map[int32]map[int64]Encoder) - mfr.messages[topic] = partitions - } - messages := partitions[partition] - if messages == nil { - messages = make(map[int64]Encoder) - partitions[partition] = messages - } - messages[offset] = msg - return mfr -} - -func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse { - partitions := mfr.highWaterMarks[topic] - if partitions == nil { - partitions = make(map[int32]int64) - mfr.highWaterMarks[topic] = partitions - } - partitions[partition] = offset - return mfr -} - -func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder { - fetchRequest := reqBody.(*FetchRequest) - res := &FetchResponse{ - Version: mfr.version, - } - for topic, partitions := range fetchRequest.blocks { - for partition, block := range partitions { - initialOffset := block.fetchOffset - offset := initialOffset - maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition)) - for i := 0; i < mfr.batchSize && offset < maxOffset; { - msg := mfr.getMessage(topic, partition, offset) - if msg != nil { - res.AddMessage(topic, partition, nil, msg, offset) - i++ - } - offset++ - } - fb := res.GetBlock(topic, partition) - if fb == nil { - res.AddError(topic, partition, ErrNoError) - fb = res.GetBlock(topic, partition) - } - fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition) - } - } - return res -} - -func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder { - partitions := mfr.messages[topic] - if partitions == nil { - return nil - } - messages := partitions[partition] - if messages == nil { - return nil - } - return messages[offset] -} - -func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int { - partitions := mfr.messages[topic] - if partitions == nil { - return 0 - } - messages := partitions[partition] - if messages == nil { - return 0 - } - return len(messages) -} - -func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 { - partitions := mfr.highWaterMarks[topic] - if partitions == nil { - return 0 - } - return partitions[partition] -} - -// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder. -type MockConsumerMetadataResponse struct { - coordinators map[string]interface{} - t TestReporter -} - -func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse { - return &MockConsumerMetadataResponse{ - coordinators: make(map[string]interface{}), - t: t, - } -} - -func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse { - mr.coordinators[group] = broker - return mr -} - -func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse { - mr.coordinators[group] = kerror - return mr -} - -func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder { - req := reqBody.(*ConsumerMetadataRequest) - group := req.ConsumerGroup - res := &ConsumerMetadataResponse{} - v := mr.coordinators[group] - switch v := v.(type) { - case *MockBroker: - res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} - case KError: - res.Err = v - } - return res -} - -// MockOffsetCommitResponse is a `OffsetCommitResponse` builder. -type MockOffsetCommitResponse struct { - errors map[string]map[string]map[int32]KError - t TestReporter -} - -func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse { - return &MockOffsetCommitResponse{t: t} -} - -func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse { - if mr.errors == nil { - mr.errors = make(map[string]map[string]map[int32]KError) - } - topics := mr.errors[group] - if topics == nil { - topics = make(map[string]map[int32]KError) - mr.errors[group] = topics - } - partitions := topics[topic] - if partitions == nil { - partitions = make(map[int32]KError) - topics[topic] = partitions - } - partitions[partition] = kerror - return mr -} - -func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder { - req := reqBody.(*OffsetCommitRequest) - group := req.ConsumerGroup - res := &OffsetCommitResponse{} - for topic, partitions := range req.blocks { - for partition := range partitions { - res.AddError(topic, partition, mr.getError(group, topic, partition)) - } - } - return res -} - -func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError { - topics := mr.errors[group] - if topics == nil { - return ErrNoError - } - partitions := topics[topic] - if partitions == nil { - return ErrNoError - } - kerror, ok := partitions[partition] - if !ok { - return ErrNoError - } - return kerror -} - -// MockProduceResponse is a `ProduceResponse` builder. -type MockProduceResponse struct { - errors map[string]map[int32]KError - t TestReporter -} - -func NewMockProduceResponse(t TestReporter) *MockProduceResponse { - return &MockProduceResponse{t: t} -} - -func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse { - if mr.errors == nil { - mr.errors = make(map[string]map[int32]KError) - } - partitions := mr.errors[topic] - if partitions == nil { - partitions = make(map[int32]KError) - mr.errors[topic] = partitions - } - partitions[partition] = kerror - return mr -} - -func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder { - req := reqBody.(*ProduceRequest) - res := &ProduceResponse{} - for topic, partitions := range req.records { - for partition := range partitions { - res.AddTopicPartition(topic, partition, mr.getError(topic, partition)) - } - } - return res -} - -func (mr *MockProduceResponse) getError(topic string, partition int32) KError { - partitions := mr.errors[topic] - if partitions == nil { - return ErrNoError - } - kerror, ok := partitions[partition] - if !ok { - return ErrNoError - } - return kerror -} - -// MockOffsetFetchResponse is a `OffsetFetchResponse` builder. -type MockOffsetFetchResponse struct { - offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock - t TestReporter -} - -func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse { - return &MockOffsetFetchResponse{t: t} -} - -func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse { - if mr.offsets == nil { - mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock) - } - topics := mr.offsets[group] - if topics == nil { - topics = make(map[string]map[int32]*OffsetFetchResponseBlock) - mr.offsets[group] = topics - } - partitions := topics[topic] - if partitions == nil { - partitions = make(map[int32]*OffsetFetchResponseBlock) - topics[topic] = partitions - } - partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror} - return mr -} - -func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder { - req := reqBody.(*OffsetFetchRequest) - group := req.ConsumerGroup - res := &OffsetFetchResponse{} - for topic, partitions := range mr.offsets[group] { - for partition, block := range partitions { - res.AddBlock(topic, partition, block) - } - } - return res -} diff --git a/vendor/github.com/Shopify/sarama/mocks/README.md b/vendor/github.com/Shopify/sarama/mocks/README.md deleted file mode 100644 index 55a6c2e61c..0000000000 --- a/vendor/github.com/Shopify/sarama/mocks/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# sarama/mocks - -The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types. -You can use them to test your sarama applications using dependency injection. - -The following mock objects are available: - -- [Consumer](https://godoc.org/github.com/Shopify/sarama/mocks#Consumer), which will create [PartitionConsumer](https://godoc.org/github.com/Shopify/sarama/mocks#PartitionConsumer) mocks. -- [AsyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#AsyncProducer) -- [SyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#SyncProducer) - -The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified, -and the results will be reported to the `*testing.T` object you provided when creating the mock. diff --git a/vendor/github.com/Shopify/sarama/mocks/async_producer.go b/vendor/github.com/Shopify/sarama/mocks/async_producer.go deleted file mode 100644 index 24ae5c0d58..0000000000 --- a/vendor/github.com/Shopify/sarama/mocks/async_producer.go +++ /dev/null @@ -1,174 +0,0 @@ -package mocks - -import ( - "sync" - - "github.com/Shopify/sarama" -) - -// AsyncProducer implements sarama's Producer interface for testing purposes. -// Before you can send messages to it's Input channel, you have to set expectations -// so it knows how to handle the input; it returns an error if the number of messages -// received is bigger then the number of expectations set. You can also set a -// function in each expectation so that the message value is checked by this function -// and an error is returned if the match fails. -type AsyncProducer struct { - l sync.Mutex - t ErrorReporter - expectations []*producerExpectation - closed chan struct{} - input chan *sarama.ProducerMessage - successes chan *sarama.ProducerMessage - errors chan *sarama.ProducerError - lastOffset int64 -} - -// NewAsyncProducer instantiates a new Producer mock. The t argument should -// be the *testing.T instance of your test method. An error will be written to it if -// an expectation is violated. The config argument is used to determine whether it -// should ack successes on the Successes channel. -func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer { - if config == nil { - config = sarama.NewConfig() - } - mp := &AsyncProducer{ - t: t, - closed: make(chan struct{}, 0), - expectations: make([]*producerExpectation, 0), - input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), - successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), - errors: make(chan *sarama.ProducerError, config.ChannelBufferSize), - } - - go func() { - defer func() { - close(mp.successes) - close(mp.errors) - }() - - for msg := range mp.input { - mp.l.Lock() - if mp.expectations == nil || len(mp.expectations) == 0 { - mp.expectations = nil - mp.t.Errorf("No more expectation set on this mock producer to handle the input message.") - } else { - expectation := mp.expectations[0] - mp.expectations = mp.expectations[1:] - if expectation.CheckFunction != nil { - if val, err := msg.Value.Encode(); err != nil { - mp.t.Errorf("Input message encoding failed: %s", err.Error()) - mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} - } else { - err = expectation.CheckFunction(val) - if err != nil { - mp.t.Errorf("Check function returned an error: %s", err.Error()) - mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} - } - } - } - if expectation.Result == errProduceSuccess { - mp.lastOffset++ - if config.Producer.Return.Successes { - msg.Offset = mp.lastOffset - mp.successes <- msg - } - } else { - if config.Producer.Return.Errors { - mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg} - } - } - } - mp.l.Unlock() - } - - mp.l.Lock() - if len(mp.expectations) > 0 { - mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations)) - } - mp.l.Unlock() - - close(mp.closed) - }() - - return mp -} - -//////////////////////////////////////////////// -// Implement Producer interface -//////////////////////////////////////////////// - -// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation. -// By closing a mock producer, you also tell it that no more input will be provided, so it will -// write an error to the test state if there's any remaining expectations. -func (mp *AsyncProducer) AsyncClose() { - close(mp.input) -} - -// Close corresponds with the Close method of sarama's Producer implementation. -// By closing a mock producer, you also tell it that no more input will be provided, so it will -// write an error to the test state if there's any remaining expectations. -func (mp *AsyncProducer) Close() error { - mp.AsyncClose() - <-mp.closed - return nil -} - -// Input corresponds with the Input method of sarama's Producer implementation. -// You have to set expectations on the mock producer before writing messages to the Input -// channel, so it knows how to handle them. If there is no more remaining expectations and -// a messages is written to the Input channel, the mock producer will write an error to the test -// state object. -func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage { - return mp.input -} - -// Successes corresponds with the Successes method of sarama's Producer implementation. -func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage { - return mp.successes -} - -// Errors corresponds with the Errors method of sarama's Producer implementation. -func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError { - return mp.errors -} - -//////////////////////////////////////////////// -// Setting expectations -//////////////////////////////////////////////// - -// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message -// will be provided on the input channel. The mock producer will call the given function to check -// the message value. If an error is returned it will be made available on the Errors channel -// otherwise the mock will handle the message as if it produced successfully, i.e. it will make -// it available on the Successes channel if the Producer.Return.Successes setting is set to true. -func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecker) { - mp.l.Lock() - defer mp.l.Unlock() - mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) -} - -// ExpectInputWithCheckerFunctionAndFail sets an expectation on the mock producer that a message -// will be provided on the input channel. The mock producer will first call the given function to -// check the message value. If an error is returned it will be made available on the Errors channel -// otherwise the mock will handle the message as if it failed to produce successfully. This means -// it will make a ProducerError available on the Errors channel. -func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndFail(cf ValueChecker, err error) { - mp.l.Lock() - defer mp.l.Unlock() - mp.expectations = append(mp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) -} - -// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided -// on the input channel. The mock producer will handle the message as if it is produced successfully, -// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting -// is set to true. -func (mp *AsyncProducer) ExpectInputAndSucceed() { - mp.ExpectInputWithCheckerFunctionAndSucceed(nil) -} - -// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided -// on the input channel. The mock producer will handle the message as if it failed to produce -// successfully. This means it will make a ProducerError available on the Errors channel. -func (mp *AsyncProducer) ExpectInputAndFail(err error) { - mp.ExpectInputWithCheckerFunctionAndFail(nil, err) -} diff --git a/vendor/github.com/Shopify/sarama/mocks/consumer.go b/vendor/github.com/Shopify/sarama/mocks/consumer.go deleted file mode 100644 index 003d4d3e28..0000000000 --- a/vendor/github.com/Shopify/sarama/mocks/consumer.go +++ /dev/null @@ -1,315 +0,0 @@ -package mocks - -import ( - "sync" - "sync/atomic" - - "github.com/Shopify/sarama" -) - -// Consumer implements sarama's Consumer interface for testing purposes. -// Before you can start consuming from this consumer, you have to register -// topic/partitions using ExpectConsumePartition, and set expectations on them. -type Consumer struct { - l sync.Mutex - t ErrorReporter - config *sarama.Config - partitionConsumers map[string]map[int32]*PartitionConsumer - metadata map[string][]int32 -} - -// NewConsumer returns a new mock Consumer instance. The t argument should -// be the *testing.T instance of your test method. An error will be written to it if -// an expectation is violated. The config argument can be set to nil. -func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer { - if config == nil { - config = sarama.NewConfig() - } - - c := &Consumer{ - t: t, - config: config, - partitionConsumers: make(map[string]map[int32]*PartitionConsumer), - } - return c -} - -/////////////////////////////////////////////////// -// Consumer interface implementation -/////////////////////////////////////////////////// - -// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface. -// Before you can start consuming a partition, you have to set expectations on it using -// ExpectConsumePartition. You can only consume a partition once per consumer. -func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) { - c.l.Lock() - defer c.l.Unlock() - - if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil { - c.t.Errorf("No expectations set for %s/%d", topic, partition) - return nil, errOutOfExpectations - } - - pc := c.partitionConsumers[topic][partition] - if pc.consumed { - return nil, sarama.ConfigurationError("The topic/partition is already being consumed") - } - - if pc.offset != AnyOffset && pc.offset != offset { - c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset) - } - - pc.consumed = true - return pc, nil -} - -// Topics returns a list of topics, as registered with SetMetadata -func (c *Consumer) Topics() ([]string, error) { - c.l.Lock() - defer c.l.Unlock() - - if c.metadata == nil { - c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetMetadata.") - return nil, sarama.ErrOutOfBrokers - } - - var result []string - for topic := range c.metadata { - result = append(result, topic) - } - return result, nil -} - -// Partitions returns the list of parititons for the given topic, as registered with SetMetadata -func (c *Consumer) Partitions(topic string) ([]int32, error) { - c.l.Lock() - defer c.l.Unlock() - - if c.metadata == nil { - c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetMetadata.") - return nil, sarama.ErrOutOfBrokers - } - if c.metadata[topic] == nil { - return nil, sarama.ErrUnknownTopicOrPartition - } - - return c.metadata[topic], nil -} - -func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { - c.l.Lock() - defer c.l.Unlock() - - hwms := make(map[string]map[int32]int64, len(c.partitionConsumers)) - for topic, partitionConsumers := range c.partitionConsumers { - hwm := make(map[int32]int64, len(partitionConsumers)) - for partition, pc := range partitionConsumers { - hwm[partition] = pc.HighWaterMarkOffset() - } - hwms[topic] = hwm - } - - return hwms -} - -// Close implements the Close method from the sarama.Consumer interface. It will close -// all registered PartitionConsumer instances. -func (c *Consumer) Close() error { - c.l.Lock() - defer c.l.Unlock() - - for _, partitions := range c.partitionConsumers { - for _, partitionConsumer := range partitions { - _ = partitionConsumer.Close() - } - } - - return nil -} - -/////////////////////////////////////////////////// -// Expectation API -/////////////////////////////////////////////////// - -// SetTopicMetadata sets the clusters topic/partition metadata, -// which will be returned by Topics() and Partitions(). -func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) { - c.l.Lock() - defer c.l.Unlock() - - c.metadata = metadata -} - -// ExpectConsumePartition will register a topic/partition, so you can set expectations on it. -// The registered PartitionConsumer will be returned, so you can set expectations -// on it using method chaining. Once a topic/partition is registered, you are -// expected to start consuming it using ConsumePartition. If that doesn't happen, -// an error will be written to the error reporter once the mock consumer is closed. It will -// also expect that the -func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer { - c.l.Lock() - defer c.l.Unlock() - - if c.partitionConsumers[topic] == nil { - c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer) - } - - if c.partitionConsumers[topic][partition] == nil { - c.partitionConsumers[topic][partition] = &PartitionConsumer{ - t: c.t, - topic: topic, - partition: partition, - offset: offset, - messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize), - errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize), - } - } - - return c.partitionConsumers[topic][partition] -} - -/////////////////////////////////////////////////// -// PartitionConsumer mock type -/////////////////////////////////////////////////// - -// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes. -// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is -// registered first using the Consumer's ExpectConsumePartition method. Before consuming the -// Errors and Messages channel, you should specify what values will be provided on these -// channels using YieldMessage and YieldError. -type PartitionConsumer struct { - highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG - l sync.Mutex - t ErrorReporter - topic string - partition int32 - offset int64 - messages chan *sarama.ConsumerMessage - errors chan *sarama.ConsumerError - singleClose sync.Once - consumed bool - errorsShouldBeDrained bool - messagesShouldBeDrained bool -} - -/////////////////////////////////////////////////// -// PartitionConsumer interface implementation -/////////////////////////////////////////////////// - -// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface. -func (pc *PartitionConsumer) AsyncClose() { - pc.singleClose.Do(func() { - close(pc.messages) - close(pc.errors) - }) -} - -// Close implements the Close method from the sarama.PartitionConsumer interface. It will -// verify whether the partition consumer was actually started. -func (pc *PartitionConsumer) Close() error { - if !pc.consumed { - pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition) - return errPartitionConsumerNotStarted - } - - if pc.errorsShouldBeDrained && len(pc.errors) > 0 { - pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors)) - } - - if pc.messagesShouldBeDrained && len(pc.messages) > 0 { - pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages)) - } - - pc.AsyncClose() - - var ( - closeErr error - wg sync.WaitGroup - ) - - wg.Add(1) - go func() { - defer wg.Done() - - var errs = make(sarama.ConsumerErrors, 0) - for err := range pc.errors { - errs = append(errs, err) - } - - if len(errs) > 0 { - closeErr = errs - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - for range pc.messages { - // drain - } - }() - - wg.Wait() - return closeErr -} - -// Errors implements the Errors method from the sarama.PartitionConsumer interface. -func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError { - return pc.errors -} - -// Messages implements the Messages method from the sarama.PartitionConsumer interface. -func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage { - return pc.messages -} - -func (pc *PartitionConsumer) HighWaterMarkOffset() int64 { - return atomic.LoadInt64(&pc.highWaterMarkOffset) + 1 -} - -/////////////////////////////////////////////////// -// Expectation API -/////////////////////////////////////////////////// - -// YieldMessage will yield a messages Messages channel of this partition consumer -// when it is consumed. By default, the mock consumer will not verify whether this -// message was consumed from the Messages channel, because there are legitimate -// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will -// verify that the channel is empty on close. -func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) { - pc.l.Lock() - defer pc.l.Unlock() - - msg.Topic = pc.topic - msg.Partition = pc.partition - msg.Offset = atomic.AddInt64(&pc.highWaterMarkOffset, 1) - - pc.messages <- msg -} - -// YieldError will yield an error on the Errors channel of this partition consumer -// when it is consumed. By default, the mock consumer will not verify whether this error was -// consumed from the Errors channel, because there are legitimate reasons for this -// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that -// the channel is empty on close. -func (pc *PartitionConsumer) YieldError(err error) { - pc.errors <- &sarama.ConsumerError{ - Topic: pc.topic, - Partition: pc.partition, - Err: err, - } -} - -// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer -// that the messages channel will be fully drained when Close is called. If this -// expectation is not met, an error is reported to the error reporter. -func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() { - pc.messagesShouldBeDrained = true -} - -// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer -// that the errors channel will be fully drained when Close is called. If this -// expectation is not met, an error is reported to the error reporter. -func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() { - pc.errorsShouldBeDrained = true -} diff --git a/vendor/github.com/Shopify/sarama/mocks/mocks.go b/vendor/github.com/Shopify/sarama/mocks/mocks.go deleted file mode 100644 index 4adb838d99..0000000000 --- a/vendor/github.com/Shopify/sarama/mocks/mocks.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Package mocks provides mocks that can be used for testing applications -that use Sarama. The mock types provided by this package implement the -interfaces Sarama exports, so you can use them for dependency injection -in your tests. - -All mock instances require you to set expectations on them before you -can use them. It will determine how the mock will behave. If an -expectation is not met, it will make your test fail. - -NOTE: this package currently does not fall under the API stability -guarantee of Sarama as it is still considered experimental. -*/ -package mocks - -import ( - "errors" - - "github.com/Shopify/sarama" -) - -// ErrorReporter is a simple interface that includes the testing.T methods we use to report -// expectation violations when using the mock objects. -type ErrorReporter interface { - Errorf(string, ...interface{}) -} - -// ValueChecker is a function type to be set in each expectation of the producer mocks -// to check the value passed. -type ValueChecker func(val []byte) error - -var ( - errProduceSuccess error = nil - errOutOfExpectations = errors.New("No more expectations set on mock") - errPartitionConsumerNotStarted = errors.New("The partition consumer was never started") -) - -const AnyOffset int64 = -1000 - -type producerExpectation struct { - Result error - CheckFunction ValueChecker -} - -type consumerExpectation struct { - Err error - Msg *sarama.ConsumerMessage -} diff --git a/vendor/github.com/Shopify/sarama/mocks/sync_producer.go b/vendor/github.com/Shopify/sarama/mocks/sync_producer.go deleted file mode 100644 index 5de79cce84..0000000000 --- a/vendor/github.com/Shopify/sarama/mocks/sync_producer.go +++ /dev/null @@ -1,146 +0,0 @@ -package mocks - -import ( - "sync" - - "github.com/Shopify/sarama" -) - -// SyncProducer implements sarama's SyncProducer interface for testing purposes. -// Before you can use it, you have to set expectations on the mock SyncProducer -// to tell it how to handle calls to SendMessage, so you can easily test success -// and failure scenarios. -type SyncProducer struct { - l sync.Mutex - t ErrorReporter - expectations []*producerExpectation - lastOffset int64 -} - -// NewSyncProducer instantiates a new SyncProducer mock. The t argument should -// be the *testing.T instance of your test method. An error will be written to it if -// an expectation is violated. The config argument is currently unused, but is -// maintained to be compatible with the async Producer. -func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer { - return &SyncProducer{ - t: t, - expectations: make([]*producerExpectation, 0), - } -} - -//////////////////////////////////////////////// -// Implement SyncProducer interface -//////////////////////////////////////////////// - -// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation. -// You have to set expectations on the mock producer before calling SendMessage, so it knows -// how to handle them. You can set a function in each expectation so that the message value -// checked by this function and an error is returned if the match fails. -// If there is no more remaining expectation when SendMessage is called, -// the mock producer will write an error to the test state object. -func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) { - sp.l.Lock() - defer sp.l.Unlock() - - if len(sp.expectations) > 0 { - expectation := sp.expectations[0] - sp.expectations = sp.expectations[1:] - if expectation.CheckFunction != nil { - val, err := msg.Value.Encode() - if err != nil { - sp.t.Errorf("Input message encoding failed: %s", err.Error()) - return -1, -1, err - } - - errCheck := expectation.CheckFunction(val) - if errCheck != nil { - sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) - return -1, -1, errCheck - } - } - if expectation.Result == errProduceSuccess { - sp.lastOffset++ - msg.Offset = sp.lastOffset - return 0, msg.Offset, nil - } - return -1, -1, expectation.Result - } - sp.t.Errorf("No more expectation set on this mock producer to handle the input message.") - return -1, -1, errOutOfExpectations -} - -// SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation. -// You have to set expectations on the mock producer before calling SendMessages, so it knows -// how to handle them. If there is no more remaining expectations when SendMessages is called, -// the mock producer will write an error to the test state object. -func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error { - sp.l.Lock() - defer sp.l.Unlock() - - if len(sp.expectations) >= len(msgs) { - expectations := sp.expectations[0 : len(msgs)-1] - sp.expectations = sp.expectations[len(msgs):] - - for _, expectation := range expectations { - if expectation.Result != errProduceSuccess { - return expectation.Result - } - - } - return nil - } - sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.") - return errOutOfExpectations -} - -// Close corresponds with the Close method of sarama's SyncProducer implementation. -// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow, -// so it will write an error to the test state if there's any remaining expectations. -func (sp *SyncProducer) Close() error { - sp.l.Lock() - defer sp.l.Unlock() - - if len(sp.expectations) > 0 { - sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations)) - } - - return nil -} - -//////////////////////////////////////////////// -// Setting expectations -//////////////////////////////////////////////// - -// ExpectSendMessageWithCheckerFunctionAndSucceed sets an expectation on the mock producer that SendMessage -// will be called. The mock producer will first call the given function to check the message value. -// It will cascade the error of the function, if any, or handle the message as if it produced -// successfully, i.e. by returning a valid partition, and offset, and a nil error. -func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueChecker) { - sp.l.Lock() - defer sp.l.Unlock() - sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) -} - -// ExpectSendMessageWithCheckerFunctionAndFail sets an expectation on the mock producer that SendMessage will be -// called. The mock producer will first call the given function to check the message value. -// It will cascade the error of the function, if any, or handle the message as if it failed -// to produce successfully, i.e. by returning the provided error. -func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndFail(cf ValueChecker, err error) { - sp.l.Lock() - defer sp.l.Unlock() - sp.expectations = append(sp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) -} - -// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be -// called. The mock producer will handle the message as if it produced successfully, i.e. by -// returning a valid partition, and offset, and a nil error. -func (sp *SyncProducer) ExpectSendMessageAndSucceed() { - sp.ExpectSendMessageWithCheckerFunctionAndSucceed(nil) -} - -// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be -// called. The mock producer will handle the message as if it failed to produce -// successfully, i.e. by returning the provided error. -func (sp *SyncProducer) ExpectSendMessageAndFail(err error) { - sp.ExpectSendMessageWithCheckerFunctionAndFail(nil, err) -} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go deleted file mode 100644 index b21ea634b0..0000000000 --- a/vendor/github.com/Shopify/sarama/offset_commit_request.go +++ /dev/null @@ -1,190 +0,0 @@ -package sarama - -// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which -// tells the broker to set the timestamp to the time at which the request was received. -// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2. -const ReceiveTime int64 = -1 - -// GroupGenerationUndefined is a special value for the group generation field of -// Offset Commit Requests that should be used when a consumer group does not rely -// on Kafka for partition management. -const GroupGenerationUndefined = -1 - -type offsetCommitRequestBlock struct { - offset int64 - timestamp int64 - metadata string -} - -func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { - pe.putInt64(b.offset) - if version == 1 { - pe.putInt64(b.timestamp) - } else if b.timestamp != 0 { - Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored") - } - - return pe.putString(b.metadata) -} - -func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) { - if b.offset, err = pd.getInt64(); err != nil { - return err - } - if version == 1 { - if b.timestamp, err = pd.getInt64(); err != nil { - return err - } - } - b.metadata, err = pd.getString() - return err -} - -type OffsetCommitRequest struct { - ConsumerGroup string - ConsumerGroupGeneration int32 // v1 or later - ConsumerID string // v1 or later - RetentionTime int64 // v2 or later - - // Version can be: - // - 0 (kafka 0.8.1 and later) - // - 1 (kafka 0.8.2 and later) - // - 2 (kafka 0.9.0 and later) - Version int16 - blocks map[string]map[int32]*offsetCommitRequestBlock -} - -func (r *OffsetCommitRequest) encode(pe packetEncoder) error { - if r.Version < 0 || r.Version > 2 { - return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"} - } - - if err := pe.putString(r.ConsumerGroup); err != nil { - return err - } - - if r.Version >= 1 { - pe.putInt32(r.ConsumerGroupGeneration) - if err := pe.putString(r.ConsumerID); err != nil { - return err - } - } else { - if r.ConsumerGroupGeneration != 0 { - Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored") - } - if r.ConsumerID != "" { - Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored") - } - } - - if r.Version >= 2 { - pe.putInt64(r.RetentionTime) - } else if r.RetentionTime != 0 { - Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored") - } - - if err := pe.putArrayLength(len(r.blocks)); err != nil { - return err - } - for topic, partitions := range r.blocks { - if err := pe.putString(topic); err != nil { - return err - } - if err := pe.putArrayLength(len(partitions)); err != nil { - return err - } - for partition, block := range partitions { - pe.putInt32(partition) - if err := block.encode(pe, r.Version); err != nil { - return err - } - } - } - return nil -} - -func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { - r.Version = version - - if r.ConsumerGroup, err = pd.getString(); err != nil { - return err - } - - if r.Version >= 1 { - if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil { - return err - } - if r.ConsumerID, err = pd.getString(); err != nil { - return err - } - } - - if r.Version >= 2 { - if r.RetentionTime, err = pd.getInt64(); err != nil { - return err - } - } - - topicCount, err := pd.getArrayLength() - if err != nil { - return err - } - if topicCount == 0 { - return nil - } - r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) - for i := 0; i < topicCount; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - partitionCount, err := pd.getArrayLength() - if err != nil { - return err - } - r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) - for j := 0; j < partitionCount; j++ { - partition, err := pd.getInt32() - if err != nil { - return err - } - block := &offsetCommitRequestBlock{} - if err := block.decode(pd, r.Version); err != nil { - return err - } - r.blocks[topic][partition] = block - } - } - return nil -} - -func (r *OffsetCommitRequest) key() int16 { - return 8 -} - -func (r *OffsetCommitRequest) version() int16 { - return r.Version -} - -func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_8_2_0 - case 2: - return V0_9_0_0 - default: - return minVersion - } -} - -func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { - if r.blocks == nil { - r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) - } - - if r.blocks[topic] == nil { - r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) - } - - r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata} -} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go deleted file mode 100644 index 7f277e7753..0000000000 --- a/vendor/github.com/Shopify/sarama/offset_commit_response.go +++ /dev/null @@ -1,85 +0,0 @@ -package sarama - -type OffsetCommitResponse struct { - Errors map[string]map[int32]KError -} - -func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) { - if r.Errors == nil { - r.Errors = make(map[string]map[int32]KError) - } - partitions := r.Errors[topic] - if partitions == nil { - partitions = make(map[int32]KError) - r.Errors[topic] = partitions - } - partitions[partition] = kerror -} - -func (r *OffsetCommitResponse) encode(pe packetEncoder) error { - if err := pe.putArrayLength(len(r.Errors)); err != nil { - return err - } - for topic, partitions := range r.Errors { - if err := pe.putString(topic); err != nil { - return err - } - if err := pe.putArrayLength(len(partitions)); err != nil { - return err - } - for partition, kerror := range partitions { - pe.putInt32(partition) - pe.putInt16(int16(kerror)) - } - } - return nil -} - -func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { - numTopics, err := pd.getArrayLength() - if err != nil || numTopics == 0 { - return err - } - - r.Errors = make(map[string]map[int32]KError, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } - - numErrors, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Errors[name] = make(map[int32]KError, numErrors) - - for j := 0; j < numErrors; j++ { - id, err := pd.getInt32() - if err != nil { - return err - } - - tmp, err := pd.getInt16() - if err != nil { - return err - } - r.Errors[name][id] = KError(tmp) - } - } - - return nil -} - -func (r *OffsetCommitResponse) key() int16 { - return 8 -} - -func (r *OffsetCommitResponse) version() int16 { - return 0 -} - -func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { - return minVersion -} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go deleted file mode 100644 index b19fe79ba7..0000000000 --- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go +++ /dev/null @@ -1,81 +0,0 @@ -package sarama - -type OffsetFetchRequest struct { - ConsumerGroup string - Version int16 - partitions map[string][]int32 -} - -func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { - if r.Version < 0 || r.Version > 1 { - return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} - } - - if err = pe.putString(r.ConsumerGroup); err != nil { - return err - } - if err = pe.putArrayLength(len(r.partitions)); err != nil { - return err - } - for topic, partitions := range r.partitions { - if err = pe.putString(topic); err != nil { - return err - } - if err = pe.putInt32Array(partitions); err != nil { - return err - } - } - return nil -} - -func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) { - r.Version = version - if r.ConsumerGroup, err = pd.getString(); err != nil { - return err - } - partitionCount, err := pd.getArrayLength() - if err != nil { - return err - } - if partitionCount == 0 { - return nil - } - r.partitions = make(map[string][]int32) - for i := 0; i < partitionCount; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - partitions, err := pd.getInt32Array() - if err != nil { - return err - } - r.partitions[topic] = partitions - } - return nil -} - -func (r *OffsetFetchRequest) key() int16 { - return 9 -} - -func (r *OffsetFetchRequest) version() int16 { - return r.Version -} - -func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_8_2_0 - default: - return minVersion - } -} - -func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) { - if r.partitions == nil { - r.partitions = make(map[string][]int32) - } - - r.partitions[topic] = append(r.partitions[topic], partitionID) -} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go deleted file mode 100644 index 323220eac9..0000000000 --- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go +++ /dev/null @@ -1,143 +0,0 @@ -package sarama - -type OffsetFetchResponseBlock struct { - Offset int64 - Metadata string - Err KError -} - -func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { - b.Offset, err = pd.getInt64() - if err != nil { - return err - } - - b.Metadata, err = pd.getString() - if err != nil { - return err - } - - tmp, err := pd.getInt16() - if err != nil { - return err - } - b.Err = KError(tmp) - - return nil -} - -func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) { - pe.putInt64(b.Offset) - - err = pe.putString(b.Metadata) - if err != nil { - return err - } - - pe.putInt16(int16(b.Err)) - - return nil -} - -type OffsetFetchResponse struct { - Blocks map[string]map[int32]*OffsetFetchResponseBlock -} - -func (r *OffsetFetchResponse) encode(pe packetEncoder) error { - if err := pe.putArrayLength(len(r.Blocks)); err != nil { - return err - } - for topic, partitions := range r.Blocks { - if err := pe.putString(topic); err != nil { - return err - } - if err := pe.putArrayLength(len(partitions)); err != nil { - return err - } - for partition, block := range partitions { - pe.putInt32(partition) - if err := block.encode(pe); err != nil { - return err - } - } - } - return nil -} - -func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) { - numTopics, err := pd.getArrayLength() - if err != nil || numTopics == 0 { - return err - } - - r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } - - numBlocks, err := pd.getArrayLength() - if err != nil { - return err - } - - if numBlocks == 0 { - r.Blocks[name] = nil - continue - } - r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) - - for j := 0; j < numBlocks; j++ { - id, err := pd.getInt32() - if err != nil { - return err - } - - block := new(OffsetFetchResponseBlock) - err = block.decode(pd) - if err != nil { - return err - } - r.Blocks[name][id] = block - } - } - - return nil -} - -func (r *OffsetFetchResponse) key() int16 { - return 9 -} - -func (r *OffsetFetchResponse) version() int16 { - return 0 -} - -func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { - return minVersion -} - -func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { - if r.Blocks == nil { - return nil - } - - if r.Blocks[topic] == nil { - return nil - } - - return r.Blocks[topic][partition] -} - -func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) { - if r.Blocks == nil { - r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock) - } - partitions := r.Blocks[topic] - if partitions == nil { - partitions = make(map[int32]*OffsetFetchResponseBlock) - r.Blocks[topic] = partitions - } - partitions[partition] = block -} diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go deleted file mode 100644 index 6c01f959e9..0000000000 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ /dev/null @@ -1,560 +0,0 @@ -package sarama - -import ( - "sync" - "time" -) - -// Offset Manager - -// OffsetManager uses Kafka to store and fetch consumed partition offsets. -type OffsetManager interface { - // ManagePartition creates a PartitionOffsetManager on the given topic/partition. - // It will return an error if this OffsetManager is already managing the given - // topic/partition. - ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) - - // Close stops the OffsetManager from managing offsets. It is required to call - // this function before an OffsetManager object passes out of scope, as it - // will otherwise leak memory. You must call this after all the - // PartitionOffsetManagers are closed. - Close() error -} - -type offsetManager struct { - client Client - conf *Config - group string - - lock sync.Mutex - poms map[string]map[int32]*partitionOffsetManager - boms map[*Broker]*brokerOffsetManager -} - -// NewOffsetManagerFromClient creates a new OffsetManager from the given client. -// It is still necessary to call Close() on the underlying client when finished with the partition manager. -func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) { - // Check that we are not dealing with a closed Client before processing any other arguments - if client.Closed() { - return nil, ErrClosedClient - } - - om := &offsetManager{ - client: client, - conf: client.Config(), - group: group, - poms: make(map[string]map[int32]*partitionOffsetManager), - boms: make(map[*Broker]*brokerOffsetManager), - } - - return om, nil -} - -func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) { - pom, err := om.newPartitionOffsetManager(topic, partition) - if err != nil { - return nil, err - } - - om.lock.Lock() - defer om.lock.Unlock() - - topicManagers := om.poms[topic] - if topicManagers == nil { - topicManagers = make(map[int32]*partitionOffsetManager) - om.poms[topic] = topicManagers - } - - if topicManagers[partition] != nil { - return nil, ConfigurationError("That topic/partition is already being managed") - } - - topicManagers[partition] = pom - return pom, nil -} - -func (om *offsetManager) Close() error { - return nil -} - -func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager { - om.lock.Lock() - defer om.lock.Unlock() - - bom := om.boms[broker] - if bom == nil { - bom = om.newBrokerOffsetManager(broker) - om.boms[broker] = bom - } - - bom.refs++ - - return bom -} - -func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) { - om.lock.Lock() - defer om.lock.Unlock() - - bom.refs-- - - if bom.refs == 0 { - close(bom.updateSubscriptions) - if om.boms[bom.broker] == bom { - delete(om.boms, bom.broker) - } - } -} - -func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) { - om.lock.Lock() - defer om.lock.Unlock() - - delete(om.boms, bom.broker) -} - -func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) { - om.lock.Lock() - defer om.lock.Unlock() - - delete(om.poms[pom.topic], pom.partition) - if len(om.poms[pom.topic]) == 0 { - delete(om.poms, pom.topic) - } -} - -// Partition Offset Manager - -// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close() -// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes -// out of scope. -type PartitionOffsetManager interface { - // NextOffset returns the next offset that should be consumed for the managed - // partition, accompanied by metadata which can be used to reconstruct the state - // of the partition consumer when it resumes. NextOffset() will return - // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset - // was committed for this partition yet. - NextOffset() (int64, string) - - // MarkOffset marks the provided offset, alongside a metadata string - // that represents the state of the partition consumer at that point in time. The - // metadata string can be used by another consumer to restore that state, so it - // can resume consumption. - // - // To follow upstream conventions, you are expected to mark the offset of the - // next message to read, not the last message read. Thus, when calling `MarkOffset` - // you should typically add one to the offset of the last consumed message. - // - // Note: calling MarkOffset does not necessarily commit the offset to the backend - // store immediately for efficiency reasons, and it may never be committed if - // your application crashes. This means that you may end up processing the same - // message twice, and your processing should ideally be idempotent. - MarkOffset(offset int64, metadata string) - - // ResetOffset resets to the provided offset, alongside a metadata string that - // represents the state of the partition consumer at that point in time. Reset - // acts as a counterpart to MarkOffset, the difference being that it allows to - // reset an offset to an earlier or smaller value, where MarkOffset only - // allows incrementing the offset. cf MarkOffset for more details. - ResetOffset(offset int64, metadata string) - - // Errors returns a read channel of errors that occur during offset management, if - // enabled. By default, errors are logged and not returned over this channel. If - // you want to implement any custom error handling, set your config's - // Consumer.Return.Errors setting to true, and read from this channel. - Errors() <-chan *ConsumerError - - // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will - // return immediately, after which you should wait until the 'errors' channel has - // been drained and closed. It is required to call this function, or Close before - // a consumer object passes out of scope, as it will otherwise leak memory. You - // must call this before calling Close on the underlying client. - AsyncClose() - - // Close stops the PartitionOffsetManager from managing offsets. It is required to - // call this function (or AsyncClose) before a PartitionOffsetManager object - // passes out of scope, as it will otherwise leak memory. You must call this - // before calling Close on the underlying client. - Close() error -} - -type partitionOffsetManager struct { - parent *offsetManager - topic string - partition int32 - - lock sync.Mutex - offset int64 - metadata string - dirty bool - clean sync.Cond - broker *brokerOffsetManager - - errors chan *ConsumerError - rebalance chan none - dying chan none -} - -func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) { - pom := &partitionOffsetManager{ - parent: om, - topic: topic, - partition: partition, - errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), - rebalance: make(chan none, 1), - dying: make(chan none), - } - pom.clean.L = &pom.lock - - if err := pom.selectBroker(); err != nil { - return nil, err - } - - if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil { - return nil, err - } - - pom.broker.updateSubscriptions <- pom - - go withRecover(pom.mainLoop) - - return pom, nil -} - -func (pom *partitionOffsetManager) mainLoop() { - for { - select { - case <-pom.rebalance: - if err := pom.selectBroker(); err != nil { - pom.handleError(err) - pom.rebalance <- none{} - } else { - pom.broker.updateSubscriptions <- pom - } - case <-pom.dying: - if pom.broker != nil { - select { - case <-pom.rebalance: - case pom.broker.updateSubscriptions <- pom: - } - pom.parent.unrefBrokerOffsetManager(pom.broker) - } - pom.parent.abandonPartitionOffsetManager(pom) - close(pom.errors) - return - } - } -} - -func (pom *partitionOffsetManager) selectBroker() error { - if pom.broker != nil { - pom.parent.unrefBrokerOffsetManager(pom.broker) - pom.broker = nil - } - - var broker *Broker - var err error - - if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil { - return err - } - - if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil { - return err - } - - pom.broker = pom.parent.refBrokerOffsetManager(broker) - return nil -} - -func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error { - request := new(OffsetFetchRequest) - request.Version = 1 - request.ConsumerGroup = pom.parent.group - request.AddPartition(pom.topic, pom.partition) - - response, err := pom.broker.broker.FetchOffset(request) - if err != nil { - return err - } - - block := response.GetBlock(pom.topic, pom.partition) - if block == nil { - return ErrIncompleteResponse - } - - switch block.Err { - case ErrNoError: - pom.offset = block.Offset - pom.metadata = block.Metadata - return nil - case ErrNotCoordinatorForConsumer: - if retries <= 0 { - return block.Err - } - if err := pom.selectBroker(); err != nil { - return err - } - return pom.fetchInitialOffset(retries - 1) - case ErrOffsetsLoadInProgress: - if retries <= 0 { - return block.Err - } - time.Sleep(pom.parent.conf.Metadata.Retry.Backoff) - return pom.fetchInitialOffset(retries - 1) - default: - return block.Err - } -} - -func (pom *partitionOffsetManager) handleError(err error) { - cErr := &ConsumerError{ - Topic: pom.topic, - Partition: pom.partition, - Err: err, - } - - if pom.parent.conf.Consumer.Return.Errors { - pom.errors <- cErr - } else { - Logger.Println(cErr) - } -} - -func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError { - return pom.errors -} - -func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) { - pom.lock.Lock() - defer pom.lock.Unlock() - - if offset > pom.offset { - pom.offset = offset - pom.metadata = metadata - pom.dirty = true - } -} - -func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) { - pom.lock.Lock() - defer pom.lock.Unlock() - - if offset <= pom.offset { - pom.offset = offset - pom.metadata = metadata - pom.dirty = true - } -} - -func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) { - pom.lock.Lock() - defer pom.lock.Unlock() - - if pom.offset == offset && pom.metadata == metadata { - pom.dirty = false - pom.clean.Signal() - } -} - -func (pom *partitionOffsetManager) NextOffset() (int64, string) { - pom.lock.Lock() - defer pom.lock.Unlock() - - if pom.offset >= 0 { - return pom.offset, pom.metadata - } - - return pom.parent.conf.Consumer.Offsets.Initial, "" -} - -func (pom *partitionOffsetManager) AsyncClose() { - go func() { - pom.lock.Lock() - defer pom.lock.Unlock() - - for pom.dirty { - pom.clean.Wait() - } - - close(pom.dying) - }() -} - -func (pom *partitionOffsetManager) Close() error { - pom.AsyncClose() - - var errors ConsumerErrors - for err := range pom.errors { - errors = append(errors, err) - } - - if len(errors) > 0 { - return errors - } - return nil -} - -// Broker Offset Manager - -type brokerOffsetManager struct { - parent *offsetManager - broker *Broker - timer *time.Ticker - updateSubscriptions chan *partitionOffsetManager - subscriptions map[*partitionOffsetManager]none - refs int -} - -func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager { - bom := &brokerOffsetManager{ - parent: om, - broker: broker, - timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval), - updateSubscriptions: make(chan *partitionOffsetManager), - subscriptions: make(map[*partitionOffsetManager]none), - } - - go withRecover(bom.mainLoop) - - return bom -} - -func (bom *brokerOffsetManager) mainLoop() { - for { - select { - case <-bom.timer.C: - if len(bom.subscriptions) > 0 { - bom.flushToBroker() - } - case s, ok := <-bom.updateSubscriptions: - if !ok { - bom.timer.Stop() - return - } - if _, ok := bom.subscriptions[s]; ok { - delete(bom.subscriptions, s) - } else { - bom.subscriptions[s] = none{} - } - } - } -} - -func (bom *brokerOffsetManager) flushToBroker() { - request := bom.constructRequest() - if request == nil { - return - } - - response, err := bom.broker.CommitOffset(request) - - if err != nil { - bom.abort(err) - return - } - - for s := range bom.subscriptions { - if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil { - continue - } - - var err KError - var ok bool - - if response.Errors[s.topic] == nil { - s.handleError(ErrIncompleteResponse) - delete(bom.subscriptions, s) - s.rebalance <- none{} - continue - } - if err, ok = response.Errors[s.topic][s.partition]; !ok { - s.handleError(ErrIncompleteResponse) - delete(bom.subscriptions, s) - s.rebalance <- none{} - continue - } - - switch err { - case ErrNoError: - block := request.blocks[s.topic][s.partition] - s.updateCommitted(block.offset, block.metadata) - case ErrNotLeaderForPartition, ErrLeaderNotAvailable, - ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer: - // not a critical error, we just need to redispatch - delete(bom.subscriptions, s) - s.rebalance <- none{} - case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize: - // nothing we can do about this, just tell the user and carry on - s.handleError(err) - case ErrOffsetsLoadInProgress: - // nothing wrong but we didn't commit, we'll get it next time round - break - case ErrUnknownTopicOrPartition: - // let the user know *and* try redispatching - if topic-auto-create is - // enabled, redispatching should trigger a metadata request and create the - // topic; if not then re-dispatching won't help, but we've let the user - // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) - fallthrough - default: - // dunno, tell the user and try redispatching - s.handleError(err) - delete(bom.subscriptions, s) - s.rebalance <- none{} - } - } -} - -func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest { - var r *OffsetCommitRequest - var perPartitionTimestamp int64 - if bom.parent.conf.Consumer.Offsets.Retention == 0 { - perPartitionTimestamp = ReceiveTime - r = &OffsetCommitRequest{ - Version: 1, - ConsumerGroup: bom.parent.group, - ConsumerGroupGeneration: GroupGenerationUndefined, - } - } else { - r = &OffsetCommitRequest{ - Version: 2, - RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond), - ConsumerGroup: bom.parent.group, - ConsumerGroupGeneration: GroupGenerationUndefined, - } - - } - - for s := range bom.subscriptions { - s.lock.Lock() - if s.dirty { - r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata) - } - s.lock.Unlock() - } - - if len(r.blocks) > 0 { - return r - } - - return nil -} - -func (bom *brokerOffsetManager) abort(err error) { - _ = bom.broker.Close() // we don't care about the error this might return, we already have one - bom.parent.abandonBroker(bom) - - for pom := range bom.subscriptions { - pom.handleError(err) - pom.rebalance <- none{} - } - - for s := range bom.updateSubscriptions { - if _, ok := bom.subscriptions[s]; !ok { - s.handleError(err) - s.rebalance <- none{} - } - } - - bom.subscriptions = make(map[*partitionOffsetManager]none) -} diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go deleted file mode 100644 index 6c26960164..0000000000 --- a/vendor/github.com/Shopify/sarama/offset_request.go +++ /dev/null @@ -1,132 +0,0 @@ -package sarama - -type offsetRequestBlock struct { - time int64 - maxOffsets int32 // Only used in version 0 -} - -func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error { - pe.putInt64(int64(b.time)) - if version == 0 { - pe.putInt32(b.maxOffsets) - } - - return nil -} - -func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) { - if b.time, err = pd.getInt64(); err != nil { - return err - } - if version == 0 { - if b.maxOffsets, err = pd.getInt32(); err != nil { - return err - } - } - return nil -} - -type OffsetRequest struct { - Version int16 - blocks map[string]map[int32]*offsetRequestBlock -} - -func (r *OffsetRequest) encode(pe packetEncoder) error { - pe.putInt32(-1) // replica ID is always -1 for clients - err := pe.putArrayLength(len(r.blocks)) - if err != nil { - return err - } - for topic, partitions := range r.blocks { - err = pe.putString(topic) - if err != nil { - return err - } - err = pe.putArrayLength(len(partitions)) - if err != nil { - return err - } - for partition, block := range partitions { - pe.putInt32(partition) - if err = block.encode(pe, r.Version); err != nil { - return err - } - } - } - return nil -} - -func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { - r.Version = version - - // Ignore replica ID - if _, err := pd.getInt32(); err != nil { - return err - } - blockCount, err := pd.getArrayLength() - if err != nil { - return err - } - if blockCount == 0 { - return nil - } - r.blocks = make(map[string]map[int32]*offsetRequestBlock) - for i := 0; i < blockCount; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - partitionCount, err := pd.getArrayLength() - if err != nil { - return err - } - r.blocks[topic] = make(map[int32]*offsetRequestBlock) - for j := 0; j < partitionCount; j++ { - partition, err := pd.getInt32() - if err != nil { - return err - } - block := &offsetRequestBlock{} - if err := block.decode(pd, version); err != nil { - return err - } - r.blocks[topic][partition] = block - } - } - return nil -} - -func (r *OffsetRequest) key() int16 { - return 2 -} - -func (r *OffsetRequest) version() int16 { - return r.Version -} - -func (r *OffsetRequest) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_10_1_0 - default: - return minVersion - } -} - -func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { - if r.blocks == nil { - r.blocks = make(map[string]map[int32]*offsetRequestBlock) - } - - if r.blocks[topic] == nil { - r.blocks[topic] = make(map[int32]*offsetRequestBlock) - } - - tmp := new(offsetRequestBlock) - tmp.time = time - if r.Version == 0 { - tmp.maxOffsets = maxOffsets - } - - r.blocks[topic][partitionID] = tmp -} diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go deleted file mode 100644 index 9a9cfe96f3..0000000000 --- a/vendor/github.com/Shopify/sarama/offset_response.go +++ /dev/null @@ -1,174 +0,0 @@ -package sarama - -type OffsetResponseBlock struct { - Err KError - Offsets []int64 // Version 0 - Offset int64 // Version 1 - Timestamp int64 // Version 1 -} - -func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - b.Err = KError(tmp) - - if version == 0 { - b.Offsets, err = pd.getInt64Array() - - return err - } - - b.Timestamp, err = pd.getInt64() - if err != nil { - return err - } - - b.Offset, err = pd.getInt64() - if err != nil { - return err - } - - // For backwards compatibility put the offset in the offsets array too - b.Offsets = []int64{b.Offset} - - return nil -} - -func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) { - pe.putInt16(int16(b.Err)) - - if version == 0 { - return pe.putInt64Array(b.Offsets) - } - - pe.putInt64(b.Timestamp) - pe.putInt64(b.Offset) - - return nil -} - -type OffsetResponse struct { - Version int16 - Blocks map[string]map[int32]*OffsetResponseBlock -} - -func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) { - numTopics, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } - - numBlocks, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks) - - for j := 0; j < numBlocks; j++ { - id, err := pd.getInt32() - if err != nil { - return err - } - - block := new(OffsetResponseBlock) - err = block.decode(pd, version) - if err != nil { - return err - } - r.Blocks[name][id] = block - } - } - - return nil -} - -func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock { - if r.Blocks == nil { - return nil - } - - if r.Blocks[topic] == nil { - return nil - } - - return r.Blocks[topic][partition] -} - -/* -// [0 0 0 1 ntopics -0 8 109 121 95 116 111 112 105 99 topic -0 0 0 1 npartitions -0 0 0 0 id -0 0 - -0 0 0 1 0 0 0 0 -0 1 1 1 0 0 0 1 -0 8 109 121 95 116 111 112 -105 99 0 0 0 1 0 0 -0 0 0 0 0 0 0 1 -0 0 0 0 0 1 1 1] - -*/ -func (r *OffsetResponse) encode(pe packetEncoder) (err error) { - if err = pe.putArrayLength(len(r.Blocks)); err != nil { - return err - } - - for topic, partitions := range r.Blocks { - if err = pe.putString(topic); err != nil { - return err - } - if err = pe.putArrayLength(len(partitions)); err != nil { - return err - } - for partition, block := range partitions { - pe.putInt32(partition) - if err = block.encode(pe, r.version()); err != nil { - return err - } - } - } - - return nil -} - -func (r *OffsetResponse) key() int16 { - return 2 -} - -func (r *OffsetResponse) version() int16 { - return r.Version -} - -func (r *OffsetResponse) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_10_1_0 - default: - return minVersion - } -} - -// testing API - -func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) { - if r.Blocks == nil { - r.Blocks = make(map[string]map[int32]*OffsetResponseBlock) - } - byTopic, ok := r.Blocks[topic] - if !ok { - byTopic = make(map[int32]*OffsetResponseBlock) - r.Blocks[topic] = byTopic - } - byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset} -} diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go deleted file mode 100644 index 74805ccbf5..0000000000 --- a/vendor/github.com/Shopify/sarama/packet_decoder.go +++ /dev/null @@ -1,60 +0,0 @@ -package sarama - -// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules. -// Types implementing Decoder only need to worry about calling methods like GetString, -// not about how a string is represented in Kafka. -type packetDecoder interface { - // Primitives - getInt8() (int8, error) - getInt16() (int16, error) - getInt32() (int32, error) - getInt64() (int64, error) - getVarint() (int64, error) - getArrayLength() (int, error) - getBool() (bool, error) - - // Collections - getBytes() ([]byte, error) - getVarintBytes() ([]byte, error) - getRawBytes(length int) ([]byte, error) - getString() (string, error) - getNullableString() (*string, error) - getInt32Array() ([]int32, error) - getInt64Array() ([]int64, error) - getStringArray() ([]string, error) - - // Subsets - remaining() int - getSubset(length int) (packetDecoder, error) - peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset - - // Stacks, see PushDecoder - push(in pushDecoder) error - pop() error -} - -// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity -// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where -// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they -// depend upon have been decoded. -type pushDecoder interface { - // Saves the offset into the input buffer as the location to actually read the calculated value when able. - saveOffset(in int) - - // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). - reserveLength() int - - // Indicates that all required data is now available to calculate and check the field. - // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes - // of data from the saved offset, and verify it based on the data between the saved offset and curOffset. - check(curOffset int, buf []byte) error -} - -// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the -// fields itself is unknown until its value was decoded (for instance varint encoded length -// fields). -// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength() -type dynamicPushDecoder interface { - pushDecoder - decoder -} diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go deleted file mode 100644 index 67b8daed82..0000000000 --- a/vendor/github.com/Shopify/sarama/packet_encoder.go +++ /dev/null @@ -1,65 +0,0 @@ -package sarama - -import "github.com/rcrowley/go-metrics" - -// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules. -// Types implementing Encoder only need to worry about calling methods like PutString, -// not about how a string is represented in Kafka. -type packetEncoder interface { - // Primitives - putInt8(in int8) - putInt16(in int16) - putInt32(in int32) - putInt64(in int64) - putVarint(in int64) - putArrayLength(in int) error - putBool(in bool) - - // Collections - putBytes(in []byte) error - putVarintBytes(in []byte) error - putRawBytes(in []byte) error - putString(in string) error - putNullableString(in *string) error - putStringArray(in []string) error - putInt32Array(in []int32) error - putInt64Array(in []int64) error - - // Provide the current offset to record the batch size metric - offset() int - - // Stacks, see PushEncoder - push(in pushEncoder) - pop() error - - // To record metrics when provided - metricRegistry() metrics.Registry -} - -// PushEncoder is the interface for encoding fields like CRCs and lengths where the value -// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where -// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they -// depend upon have been written. -type pushEncoder interface { - // Saves the offset into the input buffer as the location to actually write the calculated value when able. - saveOffset(in int) - - // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32). - reserveLength() int - - // Indicates that all required data is now available to calculate and write the field. - // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes - // of data to the saved offset, based on the data between the saved offset and curOffset. - run(curOffset int, buf []byte) error -} - -// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the -// fields itself is unknown until its value was computed (for instance varint encoded length -// fields). -type dynamicPushEncoder interface { - pushEncoder - - // Called during pop() to adjust the length of the field. - // It should return the difference in bytes between the last computed length and current length. - adjustLength(currOffset int) int -} diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go deleted file mode 100644 index 972932728a..0000000000 --- a/vendor/github.com/Shopify/sarama/partitioner.go +++ /dev/null @@ -1,135 +0,0 @@ -package sarama - -import ( - "hash" - "hash/fnv" - "math/rand" - "time" -) - -// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1], -// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided -// as simple default implementations. -type Partitioner interface { - // Partition takes a message and partition count and chooses a partition - Partition(message *ProducerMessage, numPartitions int32) (int32, error) - - // RequiresConsistency indicates to the user of the partitioner whether the - // mapping of key->partition is consistent or not. Specifically, if a - // partitioner requires consistency then it must be allowed to choose from all - // partitions (even ones known to be unavailable), and its choice must be - // respected by the caller. The obvious example is the HashPartitioner. - RequiresConsistency() bool -} - -// PartitionerConstructor is the type for a function capable of constructing new Partitioners. -type PartitionerConstructor func(topic string) Partitioner - -type manualPartitioner struct{} - -// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided -// ProducerMessage's Partition field as the partition to produce to. -func NewManualPartitioner(topic string) Partitioner { - return new(manualPartitioner) -} - -func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { - return message.Partition, nil -} - -func (p *manualPartitioner) RequiresConsistency() bool { - return true -} - -type randomPartitioner struct { - generator *rand.Rand -} - -// NewRandomPartitioner returns a Partitioner which chooses a random partition each time. -func NewRandomPartitioner(topic string) Partitioner { - p := new(randomPartitioner) - p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) - return p -} - -func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { - return int32(p.generator.Intn(int(numPartitions))), nil -} - -func (p *randomPartitioner) RequiresConsistency() bool { - return false -} - -type roundRobinPartitioner struct { - partition int32 -} - -// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time. -func NewRoundRobinPartitioner(topic string) Partitioner { - return &roundRobinPartitioner{} -} - -func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { - if p.partition >= numPartitions { - p.partition = 0 - } - ret := p.partition - p.partition++ - return ret, nil -} - -func (p *roundRobinPartitioner) RequiresConsistency() bool { - return false -} - -type hashPartitioner struct { - random Partitioner - hasher hash.Hash32 -} - -// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher. -// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that -// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance. -func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor { - return func(topic string) Partitioner { - p := new(hashPartitioner) - p.random = NewRandomPartitioner(topic) - p.hasher = hasher() - return p - } -} - -// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a -// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used, -// modulus the number of partitions. This ensures that messages with the same key always end up on the -// same partition. -func NewHashPartitioner(topic string) Partitioner { - p := new(hashPartitioner) - p.random = NewRandomPartitioner(topic) - p.hasher = fnv.New32a() - return p -} - -func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { - if message.Key == nil { - return p.random.Partition(message, numPartitions) - } - bytes, err := message.Key.Encode() - if err != nil { - return -1, err - } - p.hasher.Reset() - _, err = p.hasher.Write(bytes) - if err != nil { - return -1, err - } - partition := int32(p.hasher.Sum32()) % numPartitions - if partition < 0 { - partition = -partition - } - return partition, nil -} - -func (p *hashPartitioner) RequiresConsistency() bool { - return true -} diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go deleted file mode 100644 index b633cd1511..0000000000 --- a/vendor/github.com/Shopify/sarama/prep_encoder.go +++ /dev/null @@ -1,153 +0,0 @@ -package sarama - -import ( - "encoding/binary" - "fmt" - "math" - - "github.com/rcrowley/go-metrics" -) - -type prepEncoder struct { - stack []pushEncoder - length int -} - -// primitives - -func (pe *prepEncoder) putInt8(in int8) { - pe.length++ -} - -func (pe *prepEncoder) putInt16(in int16) { - pe.length += 2 -} - -func (pe *prepEncoder) putInt32(in int32) { - pe.length += 4 -} - -func (pe *prepEncoder) putInt64(in int64) { - pe.length += 8 -} - -func (pe *prepEncoder) putVarint(in int64) { - var buf [binary.MaxVarintLen64]byte - pe.length += binary.PutVarint(buf[:], in) -} - -func (pe *prepEncoder) putArrayLength(in int) error { - if in > math.MaxInt32 { - return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} - } - pe.length += 4 - return nil -} - -func (pe *prepEncoder) putBool(in bool) { - pe.length++ -} - -// arrays - -func (pe *prepEncoder) putBytes(in []byte) error { - pe.length += 4 - if in == nil { - return nil - } - return pe.putRawBytes(in) -} - -func (pe *prepEncoder) putVarintBytes(in []byte) error { - if in == nil { - pe.putVarint(-1) - return nil - } - pe.putVarint(int64(len(in))) - return pe.putRawBytes(in) -} - -func (pe *prepEncoder) putRawBytes(in []byte) error { - if len(in) > math.MaxInt32 { - return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} - } - pe.length += len(in) - return nil -} - -func (pe *prepEncoder) putNullableString(in *string) error { - if in == nil { - pe.length += 2 - return nil - } - return pe.putString(*in) -} - -func (pe *prepEncoder) putString(in string) error { - pe.length += 2 - if len(in) > math.MaxInt16 { - return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))} - } - pe.length += len(in) - return nil -} - -func (pe *prepEncoder) putStringArray(in []string) error { - err := pe.putArrayLength(len(in)) - if err != nil { - return err - } - - for _, str := range in { - if err := pe.putString(str); err != nil { - return err - } - } - - return nil -} - -func (pe *prepEncoder) putInt32Array(in []int32) error { - err := pe.putArrayLength(len(in)) - if err != nil { - return err - } - pe.length += 4 * len(in) - return nil -} - -func (pe *prepEncoder) putInt64Array(in []int64) error { - err := pe.putArrayLength(len(in)) - if err != nil { - return err - } - pe.length += 8 * len(in) - return nil -} - -func (pe *prepEncoder) offset() int { - return pe.length -} - -// stackable - -func (pe *prepEncoder) push(in pushEncoder) { - in.saveOffset(pe.length) - pe.length += in.reserveLength() - pe.stack = append(pe.stack, in) -} - -func (pe *prepEncoder) pop() error { - in := pe.stack[len(pe.stack)-1] - pe.stack = pe.stack[:len(pe.stack)-1] - if dpe, ok := in.(dynamicPushEncoder); ok { - pe.length += dpe.adjustLength(pe.length) - } - - return nil -} - -// we do not record metrics during the prep encoder pass -func (pe *prepEncoder) metricRegistry() metrics.Registry { - return nil -} diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go deleted file mode 100644 index 0ec4d8d53f..0000000000 --- a/vendor/github.com/Shopify/sarama/produce_request.go +++ /dev/null @@ -1,252 +0,0 @@ -package sarama - -import "github.com/rcrowley/go-metrics" - -// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements -// it must see before responding. Any of the constants defined here are valid. On broker versions -// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many -// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced -// by setting the `min.isr` value in the brokers configuration). -type RequiredAcks int16 - -const ( - // NoResponse doesn't send any response, the TCP ACK is all you get. - NoResponse RequiredAcks = 0 - // WaitForLocal waits for only the local commit to succeed before responding. - WaitForLocal RequiredAcks = 1 - // WaitForAll waits for all in-sync replicas to commit before responding. - // The minimum number of in-sync replicas is configured on the broker via - // the `min.insync.replicas` configuration key. - WaitForAll RequiredAcks = -1 -) - -type ProduceRequest struct { - TransactionalID *string - RequiredAcks RequiredAcks - Timeout int32 - Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11 - records map[string]map[int32]Records -} - -func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram, - topicCompressionRatioMetric metrics.Histogram) int64 { - var topicRecordCount int64 - for _, messageBlock := range msgSet.Messages { - // Is this a fake "message" wrapping real messages? - if messageBlock.Msg.Set != nil { - topicRecordCount += int64(len(messageBlock.Msg.Set.Messages)) - } else { - // A single uncompressed message - topicRecordCount++ - } - // Better be safe than sorry when computing the compression ratio - if messageBlock.Msg.compressedSize != 0 { - compressionRatio := float64(len(messageBlock.Msg.Value)) / - float64(messageBlock.Msg.compressedSize) - // Histogram do not support decimal values, let's multiple it by 100 for better precision - intCompressionRatio := int64(100 * compressionRatio) - compressionRatioMetric.Update(intCompressionRatio) - topicCompressionRatioMetric.Update(intCompressionRatio) - } - } - return topicRecordCount -} - -func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram, - topicCompressionRatioMetric metrics.Histogram) int64 { - if recordBatch.compressedRecords != nil { - compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100) - compressionRatioMetric.Update(compressionRatio) - topicCompressionRatioMetric.Update(compressionRatio) - } - - return int64(len(recordBatch.Records)) -} - -func (r *ProduceRequest) encode(pe packetEncoder) error { - if r.Version >= 3 { - if err := pe.putNullableString(r.TransactionalID); err != nil { - return err - } - } - pe.putInt16(int16(r.RequiredAcks)) - pe.putInt32(r.Timeout) - metricRegistry := pe.metricRegistry() - var batchSizeMetric metrics.Histogram - var compressionRatioMetric metrics.Histogram - if metricRegistry != nil { - batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry) - compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry) - } - totalRecordCount := int64(0) - - err := pe.putArrayLength(len(r.records)) - if err != nil { - return err - } - - for topic, partitions := range r.records { - err = pe.putString(topic) - if err != nil { - return err - } - err = pe.putArrayLength(len(partitions)) - if err != nil { - return err - } - topicRecordCount := int64(0) - var topicCompressionRatioMetric metrics.Histogram - if metricRegistry != nil { - topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry) - } - for id, records := range partitions { - startOffset := pe.offset() - pe.putInt32(id) - pe.push(&lengthField{}) - err = records.encode(pe) - if err != nil { - return err - } - err = pe.pop() - if err != nil { - return err - } - if metricRegistry != nil { - if r.Version >= 3 { - topicRecordCount += updateBatchMetrics(records.recordBatch, compressionRatioMetric, topicCompressionRatioMetric) - } else { - topicRecordCount += updateMsgSetMetrics(records.msgSet, compressionRatioMetric, topicCompressionRatioMetric) - } - batchSize := int64(pe.offset() - startOffset) - batchSizeMetric.Update(batchSize) - getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize) - } - } - if topicRecordCount > 0 { - getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount) - getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount) - totalRecordCount += topicRecordCount - } - } - if totalRecordCount > 0 { - metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount) - getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount) - } - - return nil -} - -func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { - r.Version = version - - if version >= 3 { - id, err := pd.getNullableString() - if err != nil { - return err - } - r.TransactionalID = id - } - requiredAcks, err := pd.getInt16() - if err != nil { - return err - } - r.RequiredAcks = RequiredAcks(requiredAcks) - if r.Timeout, err = pd.getInt32(); err != nil { - return err - } - topicCount, err := pd.getArrayLength() - if err != nil { - return err - } - if topicCount == 0 { - return nil - } - - r.records = make(map[string]map[int32]Records) - for i := 0; i < topicCount; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - partitionCount, err := pd.getArrayLength() - if err != nil { - return err - } - r.records[topic] = make(map[int32]Records) - - for j := 0; j < partitionCount; j++ { - partition, err := pd.getInt32() - if err != nil { - return err - } - size, err := pd.getInt32() - if err != nil { - return err - } - recordsDecoder, err := pd.getSubset(int(size)) - if err != nil { - return err - } - var records Records - if err := records.decode(recordsDecoder); err != nil { - return err - } - r.records[topic][partition] = records - } - } - - return nil -} - -func (r *ProduceRequest) key() int16 { - return 0 -} - -func (r *ProduceRequest) version() int16 { - return r.Version -} - -func (r *ProduceRequest) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - case 3: - return V0_11_0_0 - default: - return minVersion - } -} - -func (r *ProduceRequest) ensureRecords(topic string, partition int32) { - if r.records == nil { - r.records = make(map[string]map[int32]Records) - } - - if r.records[topic] == nil { - r.records[topic] = make(map[int32]Records) - } -} - -func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { - r.ensureRecords(topic, partition) - set := r.records[topic][partition].msgSet - - if set == nil { - set = new(MessageSet) - r.records[topic][partition] = newLegacyRecords(set) - } - - set.addMessage(msg) -} - -func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { - r.ensureRecords(topic, partition) - r.records[topic][partition] = newLegacyRecords(set) -} - -func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) { - r.ensureRecords(topic, partition) - r.records[topic][partition] = newDefaultRecords(batch) -} diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go deleted file mode 100644 index 043c40f877..0000000000 --- a/vendor/github.com/Shopify/sarama/produce_response.go +++ /dev/null @@ -1,183 +0,0 @@ -package sarama - -import ( - "fmt" - "time" -) - -type ProduceResponseBlock struct { - Err KError - Offset int64 - // only provided if Version >= 2 and the broker is configured with `LogAppendTime` - Timestamp time.Time -} - -func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - b.Err = KError(tmp) - - b.Offset, err = pd.getInt64() - if err != nil { - return err - } - - if version >= 2 { - if millis, err := pd.getInt64(); err != nil { - return err - } else if millis != -1 { - b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) - } - } - - return nil -} - -func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) { - pe.putInt16(int16(b.Err)) - pe.putInt64(b.Offset) - - if version >= 2 { - timestamp := int64(-1) - if !b.Timestamp.Before(time.Unix(0, 0)) { - timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond) - } else if !b.Timestamp.IsZero() { - return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)} - } - pe.putInt64(timestamp) - } - - return nil -} - -type ProduceResponse struct { - Blocks map[string]map[int32]*ProduceResponseBlock - Version int16 - ThrottleTime time.Duration // only provided if Version >= 1 -} - -func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { - r.Version = version - - numTopics, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } - - numBlocks, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks) - - for j := 0; j < numBlocks; j++ { - id, err := pd.getInt32() - if err != nil { - return err - } - - block := new(ProduceResponseBlock) - err = block.decode(pd, version) - if err != nil { - return err - } - r.Blocks[name][id] = block - } - } - - if r.Version >= 1 { - millis, err := pd.getInt32() - if err != nil { - return err - } - - r.ThrottleTime = time.Duration(millis) * time.Millisecond - } - - return nil -} - -func (r *ProduceResponse) encode(pe packetEncoder) error { - err := pe.putArrayLength(len(r.Blocks)) - if err != nil { - return err - } - for topic, partitions := range r.Blocks { - err = pe.putString(topic) - if err != nil { - return err - } - err = pe.putArrayLength(len(partitions)) - if err != nil { - return err - } - for id, prb := range partitions { - pe.putInt32(id) - err = prb.encode(pe, r.Version) - if err != nil { - return err - } - } - } - if r.Version >= 1 { - pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) - } - return nil -} - -func (r *ProduceResponse) key() int16 { - return 0 -} - -func (r *ProduceResponse) version() int16 { - return r.Version -} - -func (r *ProduceResponse) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - case 3: - return V0_11_0_0 - default: - return minVersion - } -} - -func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { - if r.Blocks == nil { - return nil - } - - if r.Blocks[topic] == nil { - return nil - } - - return r.Blocks[topic][partition] -} - -// Testing API - -func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) { - if r.Blocks == nil { - r.Blocks = make(map[string]map[int32]*ProduceResponseBlock) - } - byTopic, ok := r.Blocks[topic] - if !ok { - byTopic = make(map[int32]*ProduceResponseBlock) - r.Blocks[topic] = byTopic - } - byTopic[partition] = &ProduceResponseBlock{Err: err} -} diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go deleted file mode 100644 index 627fdf0315..0000000000 --- a/vendor/github.com/Shopify/sarama/produce_set.go +++ /dev/null @@ -1,224 +0,0 @@ -package sarama - -import ( - "encoding/binary" - "time" -) - -type partitionSet struct { - msgs []*ProducerMessage - recordsToSend Records - bufferBytes int -} - -type produceSet struct { - parent *asyncProducer - msgs map[string]map[int32]*partitionSet - - bufferBytes int - bufferCount int -} - -func newProduceSet(parent *asyncProducer) *produceSet { - return &produceSet{ - msgs: make(map[string]map[int32]*partitionSet), - parent: parent, - } -} - -func (ps *produceSet) add(msg *ProducerMessage) error { - var err error - var key, val []byte - - if msg.Key != nil { - if key, err = msg.Key.Encode(); err != nil { - return err - } - } - - if msg.Value != nil { - if val, err = msg.Value.Encode(); err != nil { - return err - } - } - - timestamp := msg.Timestamp - if msg.Timestamp.IsZero() { - timestamp = time.Now() - } - - partitions := ps.msgs[msg.Topic] - if partitions == nil { - partitions = make(map[int32]*partitionSet) - ps.msgs[msg.Topic] = partitions - } - - var size int - - set := partitions[msg.Partition] - if set == nil { - if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { - batch := &RecordBatch{ - FirstTimestamp: timestamp, - Version: 2, - ProducerID: -1, /* No producer id */ - Codec: ps.parent.conf.Producer.Compression, - } - set = &partitionSet{recordsToSend: newDefaultRecords(batch)} - size = recordBatchOverhead - } else { - set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))} - } - partitions[msg.Partition] = set - } - - set.msgs = append(set.msgs, msg) - if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { - // We are being conservative here to avoid having to prep encode the record - size += maximumRecordOverhead - rec := &Record{ - Key: key, - Value: val, - TimestampDelta: timestamp.Sub(set.recordsToSend.recordBatch.FirstTimestamp), - } - size += len(key) + len(val) - if len(msg.Headers) > 0 { - rec.Headers = make([]*RecordHeader, len(msg.Headers)) - for i := range msg.Headers { - rec.Headers[i] = &msg.Headers[i] - size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32 - } - } - set.recordsToSend.recordBatch.addRecord(rec) - } else { - msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val} - if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { - msgToSend.Timestamp = timestamp - msgToSend.Version = 1 - } - set.recordsToSend.msgSet.addMessage(msgToSend) - size = producerMessageOverhead + len(key) + len(val) - } - - set.bufferBytes += size - ps.bufferBytes += size - ps.bufferCount++ - - return nil -} - -func (ps *produceSet) buildRequest() *ProduceRequest { - req := &ProduceRequest{ - RequiredAcks: ps.parent.conf.Producer.RequiredAcks, - Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond), - } - if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { - req.Version = 2 - } - if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { - req.Version = 3 - } - - for topic, partitionSet := range ps.msgs { - for partition, set := range partitionSet { - if req.Version >= 3 { - req.AddBatch(topic, partition, set.recordsToSend.recordBatch) - continue - } - if ps.parent.conf.Producer.Compression == CompressionNone { - req.AddSet(topic, partition, set.recordsToSend.msgSet) - } else { - // When compression is enabled, the entire set for each partition is compressed - // and sent as the payload of a single fake "message" with the appropriate codec - // set and no key. When the server sees a message with a compression codec, it - // decompresses the payload and treats the result as its message set. - payload, err := encode(set.recordsToSend.msgSet, ps.parent.conf.MetricRegistry) - if err != nil { - Logger.Println(err) // if this happens, it's basically our fault. - panic(err) - } - compMsg := &Message{ - Codec: ps.parent.conf.Producer.Compression, - Key: nil, - Value: payload, - Set: set.recordsToSend.msgSet, // Provide the underlying message set for accurate metrics - } - if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { - compMsg.Version = 1 - compMsg.Timestamp = set.recordsToSend.msgSet.Messages[0].Msg.Timestamp - } - req.AddMessage(topic, partition, compMsg) - } - } - } - - return req -} - -func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) { - for topic, partitionSet := range ps.msgs { - for partition, set := range partitionSet { - cb(topic, partition, set.msgs) - } - } -} - -func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage { - if ps.msgs[topic] == nil { - return nil - } - set := ps.msgs[topic][partition] - if set == nil { - return nil - } - ps.bufferBytes -= set.bufferBytes - ps.bufferCount -= len(set.msgs) - delete(ps.msgs[topic], partition) - return set.msgs -} - -func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { - version := 1 - if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { - version = 2 - } - - switch { - // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. - case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)): - return true - // Would we overflow the size-limit of a compressed message-batch for this partition? - case ps.parent.conf.Producer.Compression != CompressionNone && - ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && - ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: - return true - // Would we overflow simply in number of messages? - case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages: - return true - default: - return false - } -} - -func (ps *produceSet) readyToFlush() bool { - switch { - // If we don't have any messages, nothing else matters - case ps.empty(): - return false - // If all three config values are 0, we always flush as-fast-as-possible - case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0: - return true - // If we've passed the message trigger-point - case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages: - return true - // If we've passed the byte trigger-point - case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes: - return true - default: - return false - } -} - -func (ps *produceSet) empty() bool { - return ps.bufferCount == 0 -} diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go deleted file mode 100644 index 23045e7d33..0000000000 --- a/vendor/github.com/Shopify/sarama/real_decoder.go +++ /dev/null @@ -1,324 +0,0 @@ -package sarama - -import ( - "encoding/binary" - "math" -) - -var errInvalidArrayLength = PacketDecodingError{"invalid array length"} -var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} -var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"} -var errInvalidStringLength = PacketDecodingError{"invalid string length"} -var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"} -var errVarintOverflow = PacketDecodingError{"varint overflow"} -var errInvalidBool = PacketDecodingError{"invalid bool"} - -type realDecoder struct { - raw []byte - off int - stack []pushDecoder -} - -// primitives - -func (rd *realDecoder) getInt8() (int8, error) { - if rd.remaining() < 1 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int8(rd.raw[rd.off]) - rd.off++ - return tmp, nil -} - -func (rd *realDecoder) getInt16() (int16, error) { - if rd.remaining() < 2 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:])) - rd.off += 2 - return tmp, nil -} - -func (rd *realDecoder) getInt32() (int32, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - return tmp, nil -} - -func (rd *realDecoder) getInt64() (int64, error) { - if rd.remaining() < 8 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) - rd.off += 8 - return tmp, nil -} - -func (rd *realDecoder) getVarint() (int64, error) { - tmp, n := binary.Varint(rd.raw[rd.off:]) - if n == 0 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - if n < 0 { - rd.off -= n - return -1, errVarintOverflow - } - rd.off += n - return tmp, nil -} - -func (rd *realDecoder) getArrayLength() (int, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))) - rd.off += 4 - if tmp > rd.remaining() { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } else if tmp > 2*math.MaxUint16 { - return -1, errInvalidArrayLength - } - return tmp, nil -} - -func (rd *realDecoder) getBool() (bool, error) { - b, err := rd.getInt8() - if err != nil || b == 0 { - return false, err - } - if b != 1 { - return false, errInvalidBool - } - return true, nil -} - -// collections - -func (rd *realDecoder) getBytes() ([]byte, error) { - tmp, err := rd.getInt32() - if err != nil { - return nil, err - } - if tmp == -1 { - return nil, nil - } - - return rd.getRawBytes(int(tmp)) -} - -func (rd *realDecoder) getVarintBytes() ([]byte, error) { - tmp, err := rd.getVarint() - if err != nil { - return nil, err - } - if tmp == -1 { - return nil, nil - } - - return rd.getRawBytes(int(tmp)) -} - -func (rd *realDecoder) getStringLength() (int, error) { - length, err := rd.getInt16() - if err != nil { - return 0, err - } - - n := int(length) - - switch { - case n < -1: - return 0, errInvalidStringLength - case n > rd.remaining(): - rd.off = len(rd.raw) - return 0, ErrInsufficientData - } - - return n, nil -} - -func (rd *realDecoder) getString() (string, error) { - n, err := rd.getStringLength() - if err != nil || n == -1 { - return "", err - } - - tmpStr := string(rd.raw[rd.off : rd.off+n]) - rd.off += n - return tmpStr, nil -} - -func (rd *realDecoder) getNullableString() (*string, error) { - n, err := rd.getStringLength() - if err != nil || n == -1 { - return nil, err - } - - tmpStr := string(rd.raw[rd.off : rd.off+n]) - rd.off += n - return &tmpStr, err -} - -func (rd *realDecoder) getInt32Array() ([]int32, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - - if rd.remaining() < 4*n { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - - if n == 0 { - return nil, nil - } - - if n < 0 { - return nil, errInvalidArrayLength - } - - ret := make([]int32, n) - for i := range ret { - ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - } - return ret, nil -} - -func (rd *realDecoder) getInt64Array() ([]int64, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - - if rd.remaining() < 8*n { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - - if n == 0 { - return nil, nil - } - - if n < 0 { - return nil, errInvalidArrayLength - } - - ret := make([]int64, n) - for i := range ret { - ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) - rd.off += 8 - } - return ret, nil -} - -func (rd *realDecoder) getStringArray() ([]string, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - - if n == 0 { - return nil, nil - } - - if n < 0 { - return nil, errInvalidArrayLength - } - - ret := make([]string, n) - for i := range ret { - str, err := rd.getString() - if err != nil { - return nil, err - } - - ret[i] = str - } - return ret, nil -} - -// subsets - -func (rd *realDecoder) remaining() int { - return len(rd.raw) - rd.off -} - -func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { - buf, err := rd.getRawBytes(length) - if err != nil { - return nil, err - } - return &realDecoder{raw: buf}, nil -} - -func (rd *realDecoder) getRawBytes(length int) ([]byte, error) { - if length < 0 { - return nil, errInvalidByteSliceLength - } else if length > rd.remaining() { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - - start := rd.off - rd.off += length - return rd.raw[start:rd.off], nil -} - -func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) { - if rd.remaining() < offset+length { - return nil, ErrInsufficientData - } - off := rd.off + offset - return &realDecoder{raw: rd.raw[off : off+length]}, nil -} - -// stacks - -func (rd *realDecoder) push(in pushDecoder) error { - in.saveOffset(rd.off) - - var reserve int - if dpd, ok := in.(dynamicPushDecoder); ok { - if err := dpd.decode(rd); err != nil { - return err - } - } else { - reserve = in.reserveLength() - if rd.remaining() < reserve { - rd.off = len(rd.raw) - return ErrInsufficientData - } - } - - rd.stack = append(rd.stack, in) - - rd.off += reserve - - return nil -} - -func (rd *realDecoder) pop() error { - // this is go's ugly pop pattern (the inverse of append) - in := rd.stack[len(rd.stack)-1] - rd.stack = rd.stack[:len(rd.stack)-1] - - return in.check(rd.off, rd.raw) -} diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go deleted file mode 100644 index 3c75387f77..0000000000 --- a/vendor/github.com/Shopify/sarama/real_encoder.go +++ /dev/null @@ -1,156 +0,0 @@ -package sarama - -import ( - "encoding/binary" - - "github.com/rcrowley/go-metrics" -) - -type realEncoder struct { - raw []byte - off int - stack []pushEncoder - registry metrics.Registry -} - -// primitives - -func (re *realEncoder) putInt8(in int8) { - re.raw[re.off] = byte(in) - re.off++ -} - -func (re *realEncoder) putInt16(in int16) { - binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in)) - re.off += 2 -} - -func (re *realEncoder) putInt32(in int32) { - binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in)) - re.off += 4 -} - -func (re *realEncoder) putInt64(in int64) { - binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in)) - re.off += 8 -} - -func (re *realEncoder) putVarint(in int64) { - re.off += binary.PutVarint(re.raw[re.off:], in) -} - -func (re *realEncoder) putArrayLength(in int) error { - re.putInt32(int32(in)) - return nil -} - -func (re *realEncoder) putBool(in bool) { - if in { - re.putInt8(1) - return - } - re.putInt8(0) -} - -// collection - -func (re *realEncoder) putRawBytes(in []byte) error { - copy(re.raw[re.off:], in) - re.off += len(in) - return nil -} - -func (re *realEncoder) putBytes(in []byte) error { - if in == nil { - re.putInt32(-1) - return nil - } - re.putInt32(int32(len(in))) - return re.putRawBytes(in) -} - -func (re *realEncoder) putVarintBytes(in []byte) error { - if in == nil { - re.putVarint(-1) - return nil - } - re.putVarint(int64(len(in))) - return re.putRawBytes(in) -} - -func (re *realEncoder) putString(in string) error { - re.putInt16(int16(len(in))) - copy(re.raw[re.off:], in) - re.off += len(in) - return nil -} - -func (re *realEncoder) putNullableString(in *string) error { - if in == nil { - re.putInt16(-1) - return nil - } - return re.putString(*in) -} - -func (re *realEncoder) putStringArray(in []string) error { - err := re.putArrayLength(len(in)) - if err != nil { - return err - } - - for _, val := range in { - if err := re.putString(val); err != nil { - return err - } - } - - return nil -} - -func (re *realEncoder) putInt32Array(in []int32) error { - err := re.putArrayLength(len(in)) - if err != nil { - return err - } - for _, val := range in { - re.putInt32(val) - } - return nil -} - -func (re *realEncoder) putInt64Array(in []int64) error { - err := re.putArrayLength(len(in)) - if err != nil { - return err - } - for _, val := range in { - re.putInt64(val) - } - return nil -} - -func (re *realEncoder) offset() int { - return re.off -} - -// stacks - -func (re *realEncoder) push(in pushEncoder) { - in.saveOffset(re.off) - re.off += in.reserveLength() - re.stack = append(re.stack, in) -} - -func (re *realEncoder) pop() error { - // this is go's ugly pop pattern (the inverse of append) - in := re.stack[len(re.stack)-1] - re.stack = re.stack[:len(re.stack)-1] - - return in.run(re.off, re.raw) -} - -// we do record metrics during the real encoder pass -func (re *realEncoder) metricRegistry() metrics.Registry { - return re.registry -} diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go deleted file mode 100644 index cded308cf0..0000000000 --- a/vendor/github.com/Shopify/sarama/record.go +++ /dev/null @@ -1,113 +0,0 @@ -package sarama - -import ( - "encoding/binary" - "time" -) - -const ( - controlMask = 0x20 - maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1 -) - -type RecordHeader struct { - Key []byte - Value []byte -} - -func (h *RecordHeader) encode(pe packetEncoder) error { - if err := pe.putVarintBytes(h.Key); err != nil { - return err - } - return pe.putVarintBytes(h.Value) -} - -func (h *RecordHeader) decode(pd packetDecoder) (err error) { - if h.Key, err = pd.getVarintBytes(); err != nil { - return err - } - - if h.Value, err = pd.getVarintBytes(); err != nil { - return err - } - return nil -} - -type Record struct { - Attributes int8 - TimestampDelta time.Duration - OffsetDelta int64 - Key []byte - Value []byte - Headers []*RecordHeader - - length varintLengthField -} - -func (r *Record) encode(pe packetEncoder) error { - pe.push(&r.length) - pe.putInt8(r.Attributes) - pe.putVarint(int64(r.TimestampDelta / time.Millisecond)) - pe.putVarint(r.OffsetDelta) - if err := pe.putVarintBytes(r.Key); err != nil { - return err - } - if err := pe.putVarintBytes(r.Value); err != nil { - return err - } - pe.putVarint(int64(len(r.Headers))) - - for _, h := range r.Headers { - if err := h.encode(pe); err != nil { - return err - } - } - - return pe.pop() -} - -func (r *Record) decode(pd packetDecoder) (err error) { - if err = pd.push(&r.length); err != nil { - return err - } - - if r.Attributes, err = pd.getInt8(); err != nil { - return err - } - - timestamp, err := pd.getVarint() - if err != nil { - return err - } - r.TimestampDelta = time.Duration(timestamp) * time.Millisecond - - if r.OffsetDelta, err = pd.getVarint(); err != nil { - return err - } - - if r.Key, err = pd.getVarintBytes(); err != nil { - return err - } - - if r.Value, err = pd.getVarintBytes(); err != nil { - return err - } - - numHeaders, err := pd.getVarint() - if err != nil { - return err - } - - if numHeaders >= 0 { - r.Headers = make([]*RecordHeader, numHeaders) - } - for i := int64(0); i < numHeaders; i++ { - hdr := new(RecordHeader) - if err := hdr.decode(pd); err != nil { - return err - } - r.Headers[i] = hdr - } - - return pd.pop() -} diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go deleted file mode 100644 index a8c533b174..0000000000 --- a/vendor/github.com/Shopify/sarama/record_batch.go +++ /dev/null @@ -1,265 +0,0 @@ -package sarama - -import ( - "bytes" - "compress/gzip" - "fmt" - "io/ioutil" - "time" - - "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4" -) - -const recordBatchOverhead = 49 - -type recordsArray []*Record - -func (e recordsArray) encode(pe packetEncoder) error { - for _, r := range e { - if err := r.encode(pe); err != nil { - return err - } - } - return nil -} - -func (e recordsArray) decode(pd packetDecoder) error { - for i := range e { - rec := &Record{} - if err := rec.decode(pd); err != nil { - return err - } - e[i] = rec - } - return nil -} - -type RecordBatch struct { - FirstOffset int64 - PartitionLeaderEpoch int32 - Version int8 - Codec CompressionCodec - Control bool - LastOffsetDelta int32 - FirstTimestamp time.Time - MaxTimestamp time.Time - ProducerID int64 - ProducerEpoch int16 - FirstSequence int32 - Records []*Record - PartialTrailingRecord bool - - compressedRecords []byte - recordsLen int // uncompressed records size -} - -func (b *RecordBatch) encode(pe packetEncoder) error { - if b.Version != 2 { - return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} - } - pe.putInt64(b.FirstOffset) - pe.push(&lengthField{}) - pe.putInt32(b.PartitionLeaderEpoch) - pe.putInt8(b.Version) - pe.push(newCRC32Field(crcCastagnoli)) - pe.putInt16(b.computeAttributes()) - pe.putInt32(b.LastOffsetDelta) - - if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil { - return err - } - - if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil { - return err - } - - pe.putInt64(b.ProducerID) - pe.putInt16(b.ProducerEpoch) - pe.putInt32(b.FirstSequence) - - if err := pe.putArrayLength(len(b.Records)); err != nil { - return err - } - - if b.compressedRecords == nil { - if err := b.encodeRecords(pe); err != nil { - return err - } - } - if err := pe.putRawBytes(b.compressedRecords); err != nil { - return err - } - - if err := pe.pop(); err != nil { - return err - } - return pe.pop() -} - -func (b *RecordBatch) decode(pd packetDecoder) (err error) { - if b.FirstOffset, err = pd.getInt64(); err != nil { - return err - } - - batchLen, err := pd.getInt32() - if err != nil { - return err - } - - if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil { - return err - } - - if b.Version, err = pd.getInt8(); err != nil { - return err - } - - if err = pd.push(&crc32Field{polynomial: crcCastagnoli}); err != nil { - return err - } - - attributes, err := pd.getInt16() - if err != nil { - return err - } - b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask) - b.Control = attributes&controlMask == controlMask - - if b.LastOffsetDelta, err = pd.getInt32(); err != nil { - return err - } - - if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil { - return err - } - - if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil { - return err - } - - if b.ProducerID, err = pd.getInt64(); err != nil { - return err - } - - if b.ProducerEpoch, err = pd.getInt16(); err != nil { - return err - } - - if b.FirstSequence, err = pd.getInt32(); err != nil { - return err - } - - numRecs, err := pd.getArrayLength() - if err != nil { - return err - } - if numRecs >= 0 { - b.Records = make([]*Record, numRecs) - } - - bufSize := int(batchLen) - recordBatchOverhead - recBuffer, err := pd.getRawBytes(bufSize) - if err != nil { - if err == ErrInsufficientData { - b.PartialTrailingRecord = true - b.Records = nil - return nil - } - return err - } - - if err = pd.pop(); err != nil { - return err - } - - switch b.Codec { - case CompressionNone: - case CompressionGZIP: - reader, err := gzip.NewReader(bytes.NewReader(recBuffer)) - if err != nil { - return err - } - if recBuffer, err = ioutil.ReadAll(reader); err != nil { - return err - } - case CompressionSnappy: - if recBuffer, err = snappy.Decode(recBuffer); err != nil { - return err - } - case CompressionLZ4: - reader := lz4.NewReader(bytes.NewReader(recBuffer)) - if recBuffer, err = ioutil.ReadAll(reader); err != nil { - return err - } - default: - return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", b.Codec)} - } - - b.recordsLen = len(recBuffer) - err = decode(recBuffer, recordsArray(b.Records)) - if err == ErrInsufficientData { - b.PartialTrailingRecord = true - b.Records = nil - return nil - } - return err -} - -func (b *RecordBatch) encodeRecords(pe packetEncoder) error { - var raw []byte - if b.Codec != CompressionNone { - var err error - if raw, err = encode(recordsArray(b.Records), nil); err != nil { - return err - } - b.recordsLen = len(raw) - } - - switch b.Codec { - case CompressionNone: - offset := pe.offset() - if err := recordsArray(b.Records).encode(pe); err != nil { - return err - } - b.recordsLen = pe.offset() - offset - case CompressionGZIP: - var buf bytes.Buffer - writer := gzip.NewWriter(&buf) - if _, err := writer.Write(raw); err != nil { - return err - } - if err := writer.Close(); err != nil { - return err - } - b.compressedRecords = buf.Bytes() - case CompressionSnappy: - b.compressedRecords = snappy.Encode(raw) - case CompressionLZ4: - var buf bytes.Buffer - writer := lz4.NewWriter(&buf) - if _, err := writer.Write(raw); err != nil { - return err - } - if err := writer.Close(); err != nil { - return err - } - b.compressedRecords = buf.Bytes() - default: - return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} - } - - return nil -} - -func (b *RecordBatch) computeAttributes() int16 { - attr := int16(b.Codec) & int16(compressionCodecMask) - if b.Control { - attr |= controlMask - } - return attr -} - -func (b *RecordBatch) addRecord(r *Record) { - b.Records = append(b.Records, r) -} diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go deleted file mode 100644 index 54ee7e387d..0000000000 --- a/vendor/github.com/Shopify/sarama/records.go +++ /dev/null @@ -1,167 +0,0 @@ -package sarama - -import "fmt" - -const ( - unknownRecords = iota - legacyRecords - defaultRecords - - magicOffset = 16 - magicLength = 1 -) - -// Records implements a union type containing either a RecordBatch or a legacy MessageSet. -type Records struct { - recordsType int - msgSet *MessageSet - recordBatch *RecordBatch -} - -func newLegacyRecords(msgSet *MessageSet) Records { - return Records{recordsType: legacyRecords, msgSet: msgSet} -} - -func newDefaultRecords(batch *RecordBatch) Records { - return Records{recordsType: defaultRecords, recordBatch: batch} -} - -// setTypeFromFields sets type of Records depending on which of msgSet or recordBatch is not nil. -// The first return value indicates whether both fields are nil (and the type is not set). -// If both fields are not nil, it returns an error. -func (r *Records) setTypeFromFields() (bool, error) { - if r.msgSet == nil && r.recordBatch == nil { - return true, nil - } - if r.msgSet != nil && r.recordBatch != nil { - return false, fmt.Errorf("both msgSet and recordBatch are set, but record type is unknown") - } - r.recordsType = defaultRecords - if r.msgSet != nil { - r.recordsType = legacyRecords - } - return false, nil -} - -func (r *Records) encode(pe packetEncoder) error { - if r.recordsType == unknownRecords { - if empty, err := r.setTypeFromFields(); err != nil || empty { - return err - } - } - - switch r.recordsType { - case legacyRecords: - if r.msgSet == nil { - return nil - } - return r.msgSet.encode(pe) - case defaultRecords: - if r.recordBatch == nil { - return nil - } - return r.recordBatch.encode(pe) - } - return fmt.Errorf("unknown records type: %v", r.recordsType) -} - -func (r *Records) setTypeFromMagic(pd packetDecoder) error { - dec, err := pd.peek(magicOffset, magicLength) - if err != nil { - return err - } - - magic, err := dec.getInt8() - if err != nil { - return err - } - - r.recordsType = defaultRecords - if magic < 2 { - r.recordsType = legacyRecords - } - return nil -} - -func (r *Records) decode(pd packetDecoder) error { - if r.recordsType == unknownRecords { - if err := r.setTypeFromMagic(pd); err != nil { - return nil - } - } - - switch r.recordsType { - case legacyRecords: - r.msgSet = &MessageSet{} - return r.msgSet.decode(pd) - case defaultRecords: - r.recordBatch = &RecordBatch{} - return r.recordBatch.decode(pd) - } - return fmt.Errorf("unknown records type: %v", r.recordsType) -} - -func (r *Records) numRecords() (int, error) { - if r.recordsType == unknownRecords { - if empty, err := r.setTypeFromFields(); err != nil || empty { - return 0, err - } - } - - switch r.recordsType { - case legacyRecords: - if r.msgSet == nil { - return 0, nil - } - return len(r.msgSet.Messages), nil - case defaultRecords: - if r.recordBatch == nil { - return 0, nil - } - return len(r.recordBatch.Records), nil - } - return 0, fmt.Errorf("unknown records type: %v", r.recordsType) -} - -func (r *Records) isPartial() (bool, error) { - if r.recordsType == unknownRecords { - if empty, err := r.setTypeFromFields(); err != nil || empty { - return false, err - } - } - - switch r.recordsType { - case unknownRecords: - return false, nil - case legacyRecords: - if r.msgSet == nil { - return false, nil - } - return r.msgSet.PartialTrailingMessage, nil - case defaultRecords: - if r.recordBatch == nil { - return false, nil - } - return r.recordBatch.PartialTrailingRecord, nil - } - return false, fmt.Errorf("unknown records type: %v", r.recordsType) -} - -func (r *Records) isControl() (bool, error) { - if r.recordsType == unknownRecords { - if empty, err := r.setTypeFromFields(); err != nil || empty { - return false, err - } - } - - switch r.recordsType { - case legacyRecords: - return false, nil - case defaultRecords: - if r.recordBatch == nil { - return false, nil - } - return r.recordBatch.Control, nil - } - return false, fmt.Errorf("unknown records type: %v", r.recordsType) -} diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go deleted file mode 100644 index 9c37ca78b5..0000000000 --- a/vendor/github.com/Shopify/sarama/request.go +++ /dev/null @@ -1,121 +0,0 @@ -package sarama - -import ( - "encoding/binary" - "fmt" - "io" -) - -type protocolBody interface { - encoder - versionedDecoder - key() int16 - version() int16 - requiredVersion() KafkaVersion -} - -type request struct { - correlationID int32 - clientID string - body protocolBody -} - -func (r *request) encode(pe packetEncoder) (err error) { - pe.push(&lengthField{}) - pe.putInt16(r.body.key()) - pe.putInt16(r.body.version()) - pe.putInt32(r.correlationID) - err = pe.putString(r.clientID) - if err != nil { - return err - } - err = r.body.encode(pe) - if err != nil { - return err - } - return pe.pop() -} - -func (r *request) decode(pd packetDecoder) (err error) { - var key int16 - if key, err = pd.getInt16(); err != nil { - return err - } - var version int16 - if version, err = pd.getInt16(); err != nil { - return err - } - if r.correlationID, err = pd.getInt32(); err != nil { - return err - } - r.clientID, err = pd.getString() - - r.body = allocateBody(key, version) - if r.body == nil { - return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} - } - return r.body.decode(pd, version) -} - -func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) { - lengthBytes := make([]byte, 4) - if _, err := io.ReadFull(r, lengthBytes); err != nil { - return nil, bytesRead, err - } - bytesRead += len(lengthBytes) - - length := int32(binary.BigEndian.Uint32(lengthBytes)) - if length <= 4 || length > MaxRequestSize { - return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} - } - - encodedReq := make([]byte, length) - if _, err := io.ReadFull(r, encodedReq); err != nil { - return nil, bytesRead, err - } - bytesRead += len(encodedReq) - - req = &request{} - if err := decode(encodedReq, req); err != nil { - return nil, bytesRead, err - } - return req, bytesRead, nil -} - -func allocateBody(key, version int16) protocolBody { - switch key { - case 0: - return &ProduceRequest{} - case 1: - return &FetchRequest{} - case 2: - return &OffsetRequest{Version: version} - case 3: - return &MetadataRequest{} - case 8: - return &OffsetCommitRequest{Version: version} - case 9: - return &OffsetFetchRequest{} - case 10: - return &ConsumerMetadataRequest{} - case 11: - return &JoinGroupRequest{} - case 12: - return &HeartbeatRequest{} - case 13: - return &LeaveGroupRequest{} - case 14: - return &SyncGroupRequest{} - case 15: - return &DescribeGroupsRequest{} - case 16: - return &ListGroupsRequest{} - case 17: - return &SaslHandshakeRequest{} - case 18: - return &ApiVersionsRequest{} - case 37: - return &CreatePartitionsRequest{} - } - return nil -} diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go deleted file mode 100644 index f3f4d27d6c..0000000000 --- a/vendor/github.com/Shopify/sarama/response_header.go +++ /dev/null @@ -1,21 +0,0 @@ -package sarama - -import "fmt" - -type responseHeader struct { - length int32 - correlationID int32 -} - -func (r *responseHeader) decode(pd packetDecoder) (err error) { - r.length, err = pd.getInt32() - if err != nil { - return err - } - if r.length <= 4 || r.length > MaxResponseSize { - return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)} - } - - r.correlationID, err = pd.getInt32() - return err -} diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go deleted file mode 100644 index 7d5dc60d3e..0000000000 --- a/vendor/github.com/Shopify/sarama/sarama.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level -API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level -API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation. - -To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel -and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases. -The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be -useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees -depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the -SyncProducer can still sometimes be lost. - -To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic -consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the -https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 -and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. - -For lower-level needs, the Broker and Request/Response objects permit precise control over each connection -and message sent on the wire; the Client provides higher-level metadata management that is shared between -the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up -exactly with the protocol fields documented by Kafka at -https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol - -Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry. - -Broker related metrics: - - +----------------------------------------------+------------+---------------------------------------------------------------+ - | Name | Type | Description | - +----------------------------------------------+------------+---------------------------------------------------------------+ - | incoming-byte-rate | meter | Bytes/second read off all brokers | - | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker | - | outgoing-byte-rate | meter | Bytes/second written off all brokers | - | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker | - | request-rate | meter | Requests/second sent to all brokers | - | request-rate-for-broker- | meter | Requests/second sent to a given broker | - | request-size | histogram | Distribution of the request size in bytes for all brokers | - | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker | - | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers | - | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker | - | response-rate | meter | Responses/second received from all brokers | - | response-rate-for-broker- | meter | Responses/second received from a given broker | - | response-size | histogram | Distribution of the response size in bytes for all brokers | - | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | - +----------------------------------------------+------------+---------------------------------------------------------------+ - -Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics. - -Producer related metrics: - - +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ - | Name | Type | Description | - +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ - | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics | - | batch-size-for-topic- | histogram | Distribution of the number of bytes sent per partition per request for a given topic | - | record-send-rate | meter | Records/second sent to all topics | - | record-send-rate-for-topic- | meter | Records/second sent to a given topic | - | records-per-request | histogram | Distribution of the number of records sent per request for all topics | - | records-per-request-for-topic- | histogram | Distribution of the number of records sent per request for a given topic | - | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics | - | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic | - +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ - -*/ -package sarama - -import ( - "io/ioutil" - "log" -) - -// Logger is the instance of a StdLogger interface that Sarama writes connection -// management events to. By default it is set to discard all log messages via ioutil.Discard, -// but you can set it to redirect wherever you want. -var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) - -// StdLogger is used to log error messages. -type StdLogger interface { - Print(v ...interface{}) - Printf(format string, v ...interface{}) - Println(v ...interface{}) -} - -// PanicHandler is called for recovering from panics spawned internally to the library (and thus -// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. -var PanicHandler func(interface{}) - -// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying -// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned -// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt -// to process. -var MaxRequestSize int32 = 100 * 1024 * 1024 - -// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If -// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to -// protect the client from running out of memory. Please note that brokers do not have any natural limit on -// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers -// (see https://issues.apache.org/jira/browse/KAFKA-2063). -var MaxResponseSize int32 = 100 * 1024 * 1024 diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go deleted file mode 100644 index fbbc8947b2..0000000000 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go +++ /dev/null @@ -1,33 +0,0 @@ -package sarama - -type SaslHandshakeRequest struct { - Mechanism string -} - -func (r *SaslHandshakeRequest) encode(pe packetEncoder) error { - if err := pe.putString(r.Mechanism); err != nil { - return err - } - - return nil -} - -func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) { - if r.Mechanism, err = pd.getString(); err != nil { - return err - } - - return nil -} - -func (r *SaslHandshakeRequest) key() int16 { - return 17 -} - -func (r *SaslHandshakeRequest) version() int16 { - return 0 -} - -func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { - return V0_10_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go deleted file mode 100644 index ef290d4bc6..0000000000 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go +++ /dev/null @@ -1,38 +0,0 @@ -package sarama - -type SaslHandshakeResponse struct { - Err KError - EnabledMechanisms []string -} - -func (r *SaslHandshakeResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - return pe.putStringArray(r.EnabledMechanisms) -} - -func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error { - kerr, err := pd.getInt16() - if err != nil { - return err - } - - r.Err = KError(kerr) - - if r.EnabledMechanisms, err = pd.getStringArray(); err != nil { - return err - } - - return nil -} - -func (r *SaslHandshakeResponse) key() int16 { - return 17 -} - -func (r *SaslHandshakeResponse) version() int16 { - return 0 -} - -func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { - return V0_10_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go deleted file mode 100644 index fe207080e0..0000000000 --- a/vendor/github.com/Shopify/sarama/sync_group_request.go +++ /dev/null @@ -1,100 +0,0 @@ -package sarama - -type SyncGroupRequest struct { - GroupId string - GenerationId int32 - MemberId string - GroupAssignments map[string][]byte -} - -func (r *SyncGroupRequest) encode(pe packetEncoder) error { - if err := pe.putString(r.GroupId); err != nil { - return err - } - - pe.putInt32(r.GenerationId) - - if err := pe.putString(r.MemberId); err != nil { - return err - } - - if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil { - return err - } - for memberId, memberAssignment := range r.GroupAssignments { - if err := pe.putString(memberId); err != nil { - return err - } - if err := pe.putBytes(memberAssignment); err != nil { - return err - } - } - - return nil -} - -func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) { - if r.GroupId, err = pd.getString(); err != nil { - return - } - if r.GenerationId, err = pd.getInt32(); err != nil { - return - } - if r.MemberId, err = pd.getString(); err != nil { - return - } - - n, err := pd.getArrayLength() - if err != nil { - return err - } - if n == 0 { - return nil - } - - r.GroupAssignments = make(map[string][]byte) - for i := 0; i < n; i++ { - memberId, err := pd.getString() - if err != nil { - return err - } - memberAssignment, err := pd.getBytes() - if err != nil { - return err - } - - r.GroupAssignments[memberId] = memberAssignment - } - - return nil -} - -func (r *SyncGroupRequest) key() int16 { - return 14 -} - -func (r *SyncGroupRequest) version() int16 { - return 0 -} - -func (r *SyncGroupRequest) requiredVersion() KafkaVersion { - return V0_9_0_0 -} - -func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) { - if r.GroupAssignments == nil { - r.GroupAssignments = make(map[string][]byte) - } - - r.GroupAssignments[memberId] = memberAssignment -} - -func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error { - bin, err := encode(memberAssignment, nil) - if err != nil { - return err - } - - r.AddGroupAssignment(memberId, bin) - return nil -} diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go deleted file mode 100644 index 194b382b4a..0000000000 --- a/vendor/github.com/Shopify/sarama/sync_group_response.go +++ /dev/null @@ -1,41 +0,0 @@ -package sarama - -type SyncGroupResponse struct { - Err KError - MemberAssignment []byte -} - -func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { - assignment := new(ConsumerGroupMemberAssignment) - err := decode(r.MemberAssignment, assignment) - return assignment, err -} - -func (r *SyncGroupResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - return pe.putBytes(r.MemberAssignment) -} - -func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) { - kerr, err := pd.getInt16() - if err != nil { - return err - } - - r.Err = KError(kerr) - - r.MemberAssignment, err = pd.getBytes() - return -} - -func (r *SyncGroupResponse) key() int16 { - return 14 -} - -func (r *SyncGroupResponse) version() int16 { - return 0 -} - -func (r *SyncGroupResponse) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go deleted file mode 100644 index dd096b6db6..0000000000 --- a/vendor/github.com/Shopify/sarama/sync_producer.go +++ /dev/null @@ -1,164 +0,0 @@ -package sarama - -import "sync" - -// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct -// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer -// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope. -// -// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual -// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`. -// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. -// -// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to -// be set to true in its configuration. -type SyncProducer interface { - - // SendMessage produces a given message, and returns only when it either has - // succeeded or failed to produce. It will return the partition and the offset - // of the produced message, or an error if the message failed to produce. - SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) - - // SendMessages produces a given set of messages, and returns only when all - // messages in the set have either succeeded or failed. Note that messages - // can succeed and fail individually; if some succeed and some fail, - // SendMessages will return an error. - SendMessages(msgs []*ProducerMessage) error - - // Close shuts down the producer and waits for any buffered messages to be - // flushed. You must call this function before a producer object passes out of - // scope, as it may otherwise leak memory. You must call this before calling - // Close on the underlying client. - Close() error -} - -type syncProducer struct { - producer *asyncProducer - wg sync.WaitGroup -} - -// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration. -func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { - if config == nil { - config = NewConfig() - config.Producer.Return.Successes = true - } - - if err := verifyProducerConfig(config); err != nil { - return nil, err - } - - p, err := NewAsyncProducer(addrs, config) - if err != nil { - return nil, err - } - return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil -} - -// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still -// necessary to call Close() on the underlying client when shutting down this producer. -func NewSyncProducerFromClient(client Client) (SyncProducer, error) { - if err := verifyProducerConfig(client.Config()); err != nil { - return nil, err - } - - p, err := NewAsyncProducerFromClient(client) - if err != nil { - return nil, err - } - return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil -} - -func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { - sp := &syncProducer{producer: p} - - sp.wg.Add(2) - go withRecover(sp.handleSuccesses) - go withRecover(sp.handleErrors) - - return sp -} - -func verifyProducerConfig(config *Config) error { - if !config.Producer.Return.Errors { - return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer") - } - if !config.Producer.Return.Successes { - return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer") - } - return nil -} - -func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { - oldMetadata := msg.Metadata - defer func() { - msg.Metadata = oldMetadata - }() - - expectation := make(chan *ProducerError, 1) - msg.Metadata = expectation - sp.producer.Input() <- msg - - if err := <-expectation; err != nil { - return -1, -1, err.Err - } - - return msg.Partition, msg.Offset, nil -} - -func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { - savedMetadata := make([]interface{}, len(msgs)) - for i := range msgs { - savedMetadata[i] = msgs[i].Metadata - } - defer func() { - for i := range msgs { - msgs[i].Metadata = savedMetadata[i] - } - }() - - expectations := make(chan chan *ProducerError, len(msgs)) - go func() { - for _, msg := range msgs { - expectation := make(chan *ProducerError, 1) - msg.Metadata = expectation - sp.producer.Input() <- msg - expectations <- expectation - } - close(expectations) - }() - - var errors ProducerErrors - for expectation := range expectations { - if err := <-expectation; err != nil { - errors = append(errors, err) - } - } - - if len(errors) > 0 { - return errors - } - return nil -} - -func (sp *syncProducer) handleSuccesses() { - defer sp.wg.Done() - for msg := range sp.producer.Successes() { - expectation := msg.Metadata.(chan *ProducerError) - expectation <- nil - } -} - -func (sp *syncProducer) handleErrors() { - defer sp.wg.Done() - for err := range sp.producer.Errors() { - expectation := err.Msg.Metadata.(chan *ProducerError) - expectation <- err - } -} - -func (sp *syncProducer) Close() error { - sp.producer.AsyncClose() - sp.wg.Wait() - return nil -} diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/Shopify/sarama/timestamp.go deleted file mode 100644 index 372278d0bf..0000000000 --- a/vendor/github.com/Shopify/sarama/timestamp.go +++ /dev/null @@ -1,40 +0,0 @@ -package sarama - -import ( - "fmt" - "time" -) - -type Timestamp struct { - *time.Time -} - -func (t Timestamp) encode(pe packetEncoder) error { - timestamp := int64(-1) - - if !t.Before(time.Unix(0, 0)) { - timestamp = t.UnixNano() / int64(time.Millisecond) - } else if !t.IsZero() { - return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)} - } - - pe.putInt64(timestamp) - return nil -} - -func (t Timestamp) decode(pd packetDecoder) error { - millis, err := pd.getInt64() - if err != nil { - return err - } - - // negative timestamps are invalid, in these cases we should return - // a zero time - timestamp := time.Time{} - if millis >= 0 { - timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) - } - - *t.Time = timestamp - return nil -} diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go deleted file mode 100644 index 9d7b60f161..0000000000 --- a/vendor/github.com/Shopify/sarama/utils.go +++ /dev/null @@ -1,184 +0,0 @@ -package sarama - -import ( - "bufio" - "fmt" - "net" - "regexp" -) - -type none struct{} - -// make []int32 sortable so we can sort partition numbers -type int32Slice []int32 - -func (slice int32Slice) Len() int { - return len(slice) -} - -func (slice int32Slice) Less(i, j int) bool { - return slice[i] < slice[j] -} - -func (slice int32Slice) Swap(i, j int) { - slice[i], slice[j] = slice[j], slice[i] -} - -func dupInt32Slice(input []int32) []int32 { - ret := make([]int32, 0, len(input)) - for _, val := range input { - ret = append(ret, val) - } - return ret -} - -func withRecover(fn func()) { - defer func() { - handler := PanicHandler - if handler != nil { - if err := recover(); err != nil { - handler(err) - } - } - }() - - fn() -} - -func safeAsyncClose(b *Broker) { - tmp := b // local var prevents clobbering in goroutine - go withRecover(func() { - if connected, _ := tmp.Connected(); connected { - if err := tmp.Close(); err != nil { - Logger.Println("Error closing broker", tmp.ID(), ":", err) - } - } - }) -} - -// Encoder is a simple interface for any type that can be encoded as an array of bytes -// in order to be sent as the key or value of a Kafka message. Length() is provided as an -// optimization, and must return the same as len() on the result of Encode(). -type Encoder interface { - Encode() ([]byte, error) - Length() int -} - -// make strings and byte slices encodable for convenience so they can be used as keys -// and/or values in kafka messages - -// StringEncoder implements the Encoder interface for Go strings so that they can be used -// as the Key or Value in a ProducerMessage. -type StringEncoder string - -func (s StringEncoder) Encode() ([]byte, error) { - return []byte(s), nil -} - -func (s StringEncoder) Length() int { - return len(s) -} - -// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used -// as the Key or Value in a ProducerMessage. -type ByteEncoder []byte - -func (b ByteEncoder) Encode() ([]byte, error) { - return b, nil -} - -func (b ByteEncoder) Length() int { - return len(b) -} - -// bufConn wraps a net.Conn with a buffer for reads to reduce the number of -// reads that trigger syscalls. -type bufConn struct { - net.Conn - buf *bufio.Reader -} - -func newBufConn(conn net.Conn) *bufConn { - return &bufConn{ - Conn: conn, - buf: bufio.NewReader(conn), - } -} - -func (bc *bufConn) Read(b []byte) (n int, err error) { - return bc.buf.Read(b) -} - -// KafkaVersion instances represent versions of the upstream Kafka broker. -type KafkaVersion struct { - // it's a struct rather than just typing the array directly to make it opaque and stop people - // generating their own arbitrary versions - version [4]uint -} - -func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion { - return KafkaVersion{ - version: [4]uint{major, minor, veryMinor, patch}, - } -} - -// IsAtLeast return true if and only if the version it is called on is -// greater than or equal to the version passed in: -// V1.IsAtLeast(V2) // false -// V2.IsAtLeast(V1) // true -func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool { - for i := range v.version { - if v.version[i] > other.version[i] { - return true - } else if v.version[i] < other.version[i] { - return false - } - } - return true -} - -// Effective constants defining the supported kafka versions. -var ( - V0_8_2_0 = newKafkaVersion(0, 8, 2, 0) - V0_8_2_1 = newKafkaVersion(0, 8, 2, 1) - V0_8_2_2 = newKafkaVersion(0, 8, 2, 2) - V0_9_0_0 = newKafkaVersion(0, 9, 0, 0) - V0_9_0_1 = newKafkaVersion(0, 9, 0, 1) - V0_10_0_0 = newKafkaVersion(0, 10, 0, 0) - V0_10_0_1 = newKafkaVersion(0, 10, 0, 1) - V0_10_1_0 = newKafkaVersion(0, 10, 1, 0) - V0_10_2_0 = newKafkaVersion(0, 10, 2, 0) - V0_11_0_0 = newKafkaVersion(0, 11, 0, 0) - V1_0_0_0 = newKafkaVersion(1, 0, 0, 0) - minVersion = V0_8_2_0 -) - -func ParseKafkaVersion(s string) (KafkaVersion, error) { - var major, minor, veryMinor, patch uint - var err error - if s[0] == '0' { - err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch}) - } else { - err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor}) - } - if err != nil { - return minVersion, err - } - return newKafkaVersion(major, minor, veryMinor, patch), nil -} - -func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error { - if !regexp.MustCompile(pattern).MatchString(s) { - return fmt.Errorf("invalid version `%s`", s) - } - _, err := fmt.Sscanf(s, format, v[0], v[1], v[2]) - return err -} - -func (v KafkaVersion) String() string { - if v.version[0] == 0 { - return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3]) - } else { - return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2]) - } -} diff --git a/vendor/github.com/bsm/sarama-cluster/.gitignore b/vendor/github.com/bsm/sarama-cluster/.gitignore deleted file mode 100644 index 88113c5b27..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.log -*.pid -kafka*/ -vendor/ diff --git a/vendor/github.com/bsm/sarama-cluster/.travis.yml b/vendor/github.com/bsm/sarama-cluster/.travis.yml deleted file mode 100644 index d9d5efcc20..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -sudo: false -language: go -go: - - 1.9.x - - 1.8.x -install: - - go get -u github.com/golang/dep/cmd/dep - - dep ensure -env: - - SCALA_VERSION=2.11 KAFKA_VERSION=0.10.1.1 - - SCALA_VERSION=2.12 KAFKA_VERSION=0.10.2.1 - - SCALA_VERSION=2.12 KAFKA_VERSION=0.11.0.1 - - SCALA_VERSION=2.12 KAFKA_VERSION=1.0.0 -script: - - make default test-race -addons: - apt: - packages: - - oracle-java8-set-default diff --git a/vendor/github.com/bsm/sarama-cluster/Gopkg.lock b/vendor/github.com/bsm/sarama-cluster/Gopkg.lock deleted file mode 100644 index b7c59b1c03..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/Gopkg.lock +++ /dev/null @@ -1,99 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/Shopify/sarama" - packages = ["."] - revision = "240fd146ce68bcafb034cc5dc977229ffbafa8ea" - version = "v1.14.0" - -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - name = "github.com/eapache/go-resiliency" - packages = ["breaker"] - revision = "6800482f2c813e689c88b7ed3282262385011890" - version = "v1.0.0" - -[[projects]] - branch = "master" - name = "github.com/eapache/go-xerial-snappy" - packages = ["."] - revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c" - -[[projects]] - name = "github.com/eapache/queue" - packages = ["."] - revision = "ded5959c0d4e360646dc9e9908cff48666781367" - version = "v1.0.2" - -[[projects]] - branch = "master" - name = "github.com/golang/snappy" - packages = ["."] - revision = "553a641470496b2327abcac10b36396bd98e45c9" - -[[projects]] - name = "github.com/onsi/ginkgo" - packages = [".","config","extensions/table","internal/codelocation","internal/containernode","internal/failer","internal/leafnodes","internal/remote","internal/spec","internal/spec_iterator","internal/specrunner","internal/suite","internal/testingtproxy","internal/writer","reporters","reporters/stenographer","reporters/stenographer/support/go-colorable","reporters/stenographer/support/go-isatty","types"] - revision = "9eda700730cba42af70d53180f9dcce9266bc2bc" - version = "v1.4.0" - -[[projects]] - name = "github.com/onsi/gomega" - packages = [".","format","internal/assertion","internal/asyncassertion","internal/oraclematcher","internal/testingtsupport","matchers","matchers/support/goraph/bipartitegraph","matchers/support/goraph/edge","matchers/support/goraph/node","matchers/support/goraph/util","types"] - revision = "c893efa28eb45626cdaa76c9f653b62488858837" - version = "v1.2.0" - -[[projects]] - name = "github.com/pierrec/lz4" - packages = ["."] - revision = "08c27939df1bd95e881e2c2367a749964ad1fceb" - version = "v1.0.1" - -[[projects]] - name = "github.com/pierrec/xxHash" - packages = ["xxHash32"] - revision = "f051bb7f1d1aaf1b5a665d74fb6b0217712c69f7" - version = "v0.1.1" - -[[projects]] - branch = "master" - name = "github.com/rcrowley/go-metrics" - packages = ["."] - revision = "1f30fe9094a513ce4c700b9a54458bbb0c96996c" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = ["html","html/atom","html/charset"] - revision = "a337091b0525af65de94df2eb7e98bd9962dcbe2" - -[[projects]] - branch = "master" - name = "golang.org/x/sys" - packages = ["unix"] - revision = "665f6529cca930e27b831a0d1dafffbe1c172924" - -[[projects]] - branch = "master" - name = "golang.org/x/text" - packages = ["encoding","encoding/charmap","encoding/htmlindex","encoding/internal","encoding/internal/identifier","encoding/japanese","encoding/korean","encoding/simplifiedchinese","encoding/traditionalchinese","encoding/unicode","internal/gen","internal/tag","internal/utf8internal","language","runes","transform","unicode/cldr"] - revision = "88f656faf3f37f690df1a32515b479415e1a6769" - -[[projects]] - branch = "v2" - name = "gopkg.in/yaml.v2" - packages = ["."] - revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "2fa33a2d1ae87e0905ef09332bb4b3fda29179f6bcd48fd3b94070774b9e458b" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/bsm/sarama-cluster/Gopkg.toml b/vendor/github.com/bsm/sarama-cluster/Gopkg.toml deleted file mode 100644 index 1eecfefce5..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/Gopkg.toml +++ /dev/null @@ -1,26 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - name = "github.com/Shopify/sarama" - version = "^1.14.0" diff --git a/vendor/github.com/bsm/sarama-cluster/LICENSE b/vendor/github.com/bsm/sarama-cluster/LICENSE deleted file mode 100644 index 127751c47a..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -(The MIT License) - -Copyright (c) 2017 Black Square Media Ltd - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/bsm/sarama-cluster/Makefile b/vendor/github.com/bsm/sarama-cluster/Makefile deleted file mode 100644 index 706f58ec17..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/Makefile +++ /dev/null @@ -1,35 +0,0 @@ -SCALA_VERSION?= 2.12 -KAFKA_VERSION?= 1.0.0 -KAFKA_DIR= kafka_$(SCALA_VERSION)-$(KAFKA_VERSION) -KAFKA_SRC= https://archive.apache.org/dist/kafka/$(KAFKA_VERSION)/$(KAFKA_DIR).tgz -KAFKA_ROOT= testdata/$(KAFKA_DIR) -PKG=$(shell go list ./... | grep -v vendor) - -default: vet test - -vet: - go vet $(PKG) - -test: testdeps - KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 - -test-verbose: testdeps - KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 -v - -test-race: testdeps - KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 -v -race - -testdeps: $(KAFKA_ROOT) - -doc: README.md - -.PHONY: test testdeps vet doc - -# --------------------------------------------------------------------- - -$(KAFKA_ROOT): - @mkdir -p $(dir $@) - cd $(dir $@) && curl -sSL $(KAFKA_SRC) | tar xz - -README.md: README.md.tpl $(wildcard *.go) - becca -package $(subst $(GOPATH)/src/,,$(PWD)) diff --git a/vendor/github.com/bsm/sarama-cluster/README.md b/vendor/github.com/bsm/sarama-cluster/README.md deleted file mode 100644 index ebcd755dad..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/README.md +++ /dev/null @@ -1,151 +0,0 @@ -# Sarama Cluster - -[![GoDoc](https://godoc.org/github.com/bsm/sarama-cluster?status.svg)](https://godoc.org/github.com/bsm/sarama-cluster) -[![Build Status](https://travis-ci.org/bsm/sarama-cluster.svg?branch=master)](https://travis-ci.org/bsm/sarama-cluster) -[![Go Report Card](https://goreportcard.com/badge/github.com/bsm/sarama-cluster)](https://goreportcard.com/report/github.com/bsm/sarama-cluster) -[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) - -Cluster extensions for [Sarama](https://github.com/Shopify/sarama), the Go client library for Apache Kafka 0.9 (and later). - -## Documentation - -Documentation and example are available via godoc at http://godoc.org/github.com/bsm/sarama-cluster - -## Examples - -Consumers have two modes of operation. In the default multiplexed mode messages (and errors) of multiple -topics and partitions are all passed to the single channel: - -```go -package main - -import ( - "fmt" - "log" - "os" - "os/signal" - - cluster "github.com/bsm/sarama-cluster" -) - -func main() { - - // init (custom) config, enable errors and notifications - config := cluster.NewConfig() - config.Consumer.Return.Errors = true - config.Group.Return.Notifications = true - - // init consumer - brokers := []string{"127.0.0.1:9092"} - topics := []string{"my_topic", "other_topic"} - consumer, err := cluster.NewConsumer(brokers, "my-consumer-group", topics, config) - if err != nil { - panic(err) - } - defer consumer.Close() - - // trap SIGINT to trigger a shutdown. - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt) - - // consume errors - go func() { - for err := range consumer.Errors() { - log.Printf("Error: %s\n", err.Error()) - } - }() - - // consume notifications - go func() { - for ntf := range consumer.Notifications() { - log.Printf("Rebalanced: %+v\n", ntf) - } - }() - - // consume messages, watch signals - for { - select { - case msg, ok := <-consumer.Messages(): - if ok { - fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value) - consumer.MarkOffset(msg, "") // mark message as processed - } - case <-signals: - return - } - } -} -``` - -Users who require access to individual partitions can use the partitioned mode which exposes access to partition-level -consumers: - -```go -package main - -import ( - "fmt" - "log" - "os" - "os/signal" - - cluster "github.com/bsm/sarama-cluster" -) - -func main() { - - // init (custom) config, set mode to ConsumerModePartitions - config := cluster.NewConfig() - config.Group.Mode = cluster.ConsumerModePartitions - - // init consumer - brokers := []string{"127.0.0.1:9092"} - topics := []string{"my_topic", "other_topic"} - consumer, err := cluster.NewConsumer(brokers, "my-consumer-group", topics, config) - if err != nil { - panic(err) - } - defer consumer.Close() - - // trap SIGINT to trigger a shutdown. - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt) - - // consume partitions - for { - select { - case part, ok := <-consumer.Partitions(): - if !ok { - return - } - - // start a separate goroutine to consume messages - go func(pc cluster.PartitionConsumer) { - for msg := range pc.Messages() { - fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value) - consumer.MarkOffset(msg, "") // mark message as processed - } - }(part) - case <-signals: - return - } - } -} -``` - -## Running tests - -You need to install Ginkgo & Gomega to run tests. Please see -http://onsi.github.io/ginkgo for more details. - -To run tests, call: - - $ make test - -## Troubleshooting - -### Consumer not receiving any messages? - -By default, sarama's `Config.Consumer.Offsets.Initial` is set to `sarama.OffsetNewest`. This means that in the event that a brand new consumer is created, and it has never committed any offsets to kafka, it will only receive messages starting from the message after the current one that was written. - -If you wish to receive all messages (from the start of all messages in the topic) in the event that a consumer does not have any offsets committed to kafka, you need to set `Config.Consumer.Offsets.Initial` to `sarama.OffsetOldest`. diff --git a/vendor/github.com/bsm/sarama-cluster/README.md.tpl b/vendor/github.com/bsm/sarama-cluster/README.md.tpl deleted file mode 100644 index 5f63a690a3..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/README.md.tpl +++ /dev/null @@ -1,67 +0,0 @@ -# Sarama Cluster - -[![GoDoc](https://godoc.org/github.com/bsm/sarama-cluster?status.svg)](https://godoc.org/github.com/bsm/sarama-cluster) -[![Build Status](https://travis-ci.org/bsm/sarama-cluster.svg?branch=master)](https://travis-ci.org/bsm/sarama-cluster) -[![Go Report Card](https://goreportcard.com/badge/github.com/bsm/sarama-cluster)](https://goreportcard.com/report/github.com/bsm/sarama-cluster) -[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) - -Cluster extensions for [Sarama](https://github.com/Shopify/sarama), the Go client library for Apache Kafka 0.9 (and later). - -## Documentation - -Documentation and example are available via godoc at http://godoc.org/github.com/bsm/sarama-cluster - -## Examples - -Consumers have two modes of operation. In the default multiplexed mode messages (and errors) of multiple -topics and partitions are all passed to the single channel: - -```go -package main - -import ( - "fmt" - "log" - "os" - "os/signal" - - cluster "github.com/bsm/sarama-cluster" -) - -func main() {{ "ExampleConsumer" | code }} -``` - -Users who require access to individual partitions can use the partitioned mode which exposes access to partition-level -consumers: - -```go -package main - -import ( - "fmt" - "log" - "os" - "os/signal" - - cluster "github.com/bsm/sarama-cluster" -) - -func main() {{ "ExampleConsumer_Partitions" | code }} -``` - -## Running tests - -You need to install Ginkgo & Gomega to run tests. Please see -http://onsi.github.io/ginkgo for more details. - -To run tests, call: - - $ make test - -## Troubleshooting - -### Consumer not receiving any messages? - -By default, sarama's `Config.Consumer.Offsets.Initial` is set to `sarama.OffsetNewest`. This means that in the event that a brand new consumer is created, and it has never committed any offsets to kafka, it will only receive messages starting from the message after the current one that was written. - -If you wish to receive all messages (from the start of all messages in the topic) in the event that a consumer does not have any offsets committed to kafka, you need to set `Config.Consumer.Offsets.Initial` to `sarama.OffsetOldest`. diff --git a/vendor/github.com/bsm/sarama-cluster/balancer.go b/vendor/github.com/bsm/sarama-cluster/balancer.go deleted file mode 100644 index 0f9b445ee4..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/balancer.go +++ /dev/null @@ -1,174 +0,0 @@ -package cluster - -import ( - "math" - "sort" - - "github.com/Shopify/sarama" -) - -// NotificationType defines the type of notification -type NotificationType uint8 - -// String describes the notification type -func (t NotificationType) String() string { - switch t { - case RebalanceStart: - return "rebalance start" - case RebalanceOK: - return "rebalance OK" - case RebalanceError: - return "rebalance error" - } - return "unknown" -} - -const ( - UnknownNotification NotificationType = iota - RebalanceStart - RebalanceOK - RebalanceError -) - -// Notification are state events emitted by the consumers on rebalance -type Notification struct { - // Type exposes the notification type - Type NotificationType - - // Claimed contains topic/partitions that were claimed by this rebalance cycle - Claimed map[string][]int32 - - // Released contains topic/partitions that were released as part of this rebalance cycle - Released map[string][]int32 - - // Current are topic/partitions that are currently claimed to the consumer - Current map[string][]int32 -} - -func newNotification(current map[string][]int32) *Notification { - return &Notification{ - Type: RebalanceStart, - Current: current, - } -} - -func (n *Notification) success(current map[string][]int32) *Notification { - o := &Notification{ - Type: RebalanceOK, - Claimed: make(map[string][]int32), - Released: make(map[string][]int32), - Current: current, - } - for topic, partitions := range current { - o.Claimed[topic] = int32Slice(partitions).Diff(int32Slice(n.Current[topic])) - } - for topic, partitions := range n.Current { - o.Released[topic] = int32Slice(partitions).Diff(int32Slice(current[topic])) - } - return o -} - -// -------------------------------------------------------------------- - -type topicInfo struct { - Partitions []int32 - MemberIDs []string -} - -func (info topicInfo) Perform(s Strategy) map[string][]int32 { - if s == StrategyRoundRobin { - return info.RoundRobin() - } - return info.Ranges() -} - -func (info topicInfo) Ranges() map[string][]int32 { - sort.Strings(info.MemberIDs) - - mlen := len(info.MemberIDs) - plen := len(info.Partitions) - res := make(map[string][]int32, mlen) - - for pos, memberID := range info.MemberIDs { - n, i := float64(plen)/float64(mlen), float64(pos) - min := int(math.Floor(i*n + 0.5)) - max := int(math.Floor((i+1)*n + 0.5)) - sub := info.Partitions[min:max] - if len(sub) > 0 { - res[memberID] = sub - } - } - return res -} - -func (info topicInfo) RoundRobin() map[string][]int32 { - sort.Strings(info.MemberIDs) - - mlen := len(info.MemberIDs) - res := make(map[string][]int32, mlen) - for i, pnum := range info.Partitions { - memberID := info.MemberIDs[i%mlen] - res[memberID] = append(res[memberID], pnum) - } - return res -} - -// -------------------------------------------------------------------- - -type balancer struct { - client sarama.Client - topics map[string]topicInfo -} - -func newBalancerFromMeta(client sarama.Client, members map[string]sarama.ConsumerGroupMemberMetadata) (*balancer, error) { - balancer := newBalancer(client) - for memberID, meta := range members { - for _, topic := range meta.Topics { - if err := balancer.Topic(topic, memberID); err != nil { - return nil, err - } - } - } - return balancer, nil -} - -func newBalancer(client sarama.Client) *balancer { - return &balancer{ - client: client, - topics: make(map[string]topicInfo), - } -} - -func (r *balancer) Topic(name string, memberID string) error { - topic, ok := r.topics[name] - if !ok { - nums, err := r.client.Partitions(name) - if err != nil { - return err - } - topic = topicInfo{ - Partitions: nums, - MemberIDs: make([]string, 0, 1), - } - } - topic.MemberIDs = append(topic.MemberIDs, memberID) - r.topics[name] = topic - return nil -} - -func (r *balancer) Perform(s Strategy) map[string]map[string][]int32 { - if r == nil { - return nil - } - - res := make(map[string]map[string][]int32, 1) - for topic, info := range r.topics { - for memberID, partitions := range info.Perform(s) { - if _, ok := res[memberID]; !ok { - res[memberID] = make(map[string][]int32, 1) - } - res[memberID][topic] = partitions - } - } - return res -} diff --git a/vendor/github.com/bsm/sarama-cluster/client.go b/vendor/github.com/bsm/sarama-cluster/client.go deleted file mode 100644 index 42ffb30c01..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/client.go +++ /dev/null @@ -1,50 +0,0 @@ -package cluster - -import ( - "errors" - "sync/atomic" - - "github.com/Shopify/sarama" -) - -var errClientInUse = errors.New("cluster: client is already used by another consumer") - -// Client is a group client -type Client struct { - sarama.Client - config Config - - inUse uint32 -} - -// NewClient creates a new client instance -func NewClient(addrs []string, config *Config) (*Client, error) { - if config == nil { - config = NewConfig() - } - - if err := config.Validate(); err != nil { - return nil, err - } - - client, err := sarama.NewClient(addrs, &config.Config) - if err != nil { - return nil, err - } - - return &Client{Client: client, config: *config}, nil -} - -// ClusterConfig returns the cluster configuration. -func (c *Client) ClusterConfig() *Config { - cfg := c.config - return &cfg -} - -func (c *Client) claim() bool { - return atomic.CompareAndSwapUint32(&c.inUse, 0, 1) -} - -func (c *Client) release() { - atomic.CompareAndSwapUint32(&c.inUse, 1, 0) -} diff --git a/vendor/github.com/bsm/sarama-cluster/cluster.go b/vendor/github.com/bsm/sarama-cluster/cluster.go deleted file mode 100644 index adcf0e9c1c..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/cluster.go +++ /dev/null @@ -1,25 +0,0 @@ -package cluster - -// Strategy for partition to consumer assignement -type Strategy string - -const ( - // StrategyRange is the default and assigns partition ranges to consumers. - // Example with six partitions and two consumers: - // C1: [0, 1, 2] - // C2: [3, 4, 5] - StrategyRange Strategy = "range" - - // StrategyRoundRobin assigns partitions by alternating over consumers. - // Example with six partitions and two consumers: - // C1: [0, 2, 4] - // C2: [1, 3, 5] - StrategyRoundRobin Strategy = "roundrobin" -) - -// Error instances are wrappers for internal errors with a context and -// may be returned through the consumer's Errors() channel -type Error struct { - Ctx string - error -} diff --git a/vendor/github.com/bsm/sarama-cluster/config.go b/vendor/github.com/bsm/sarama-cluster/config.go deleted file mode 100644 index 084b835f71..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/config.go +++ /dev/null @@ -1,146 +0,0 @@ -package cluster - -import ( - "regexp" - "time" - - "github.com/Shopify/sarama" -) - -var minVersion = sarama.V0_9_0_0 - -type ConsumerMode uint8 - -const ( - ConsumerModeMultiplex ConsumerMode = iota - ConsumerModePartitions -) - -// Config extends sarama.Config with Group specific namespace -type Config struct { - sarama.Config - - // Group is the namespace for group management properties - Group struct { - - // The strategy to use for the allocation of partitions to consumers (defaults to StrategyRange) - PartitionStrategy Strategy - - // By default, messages and errors from the subscribed topics and partitions are all multiplexed and - // made available through the consumer's Messages() and Errors() channels. - // - // Users who require low-level access can enable ConsumerModePartitions where individual partitions - // are exposed on the Partitions() channel. Messages and errors must then be consumed on the partitions - // themselves. - Mode ConsumerMode - - Offsets struct { - Retry struct { - // The numer retries when committing offsets (defaults to 3). - Max int - } - Synchronization struct { - // The duration allowed for other clients to commit their offsets before resumption in this client, e.g. during a rebalance - // NewConfig sets this to the Consumer.MaxProcessingTime duration of the Sarama configuration - DwellTime time.Duration - } - } - - Session struct { - // The allowed session timeout for registered consumers (defaults to 30s). - // Must be within the allowed server range. - Timeout time.Duration - } - - Heartbeat struct { - // Interval between each heartbeat (defaults to 3s). It should be no more - // than 1/3rd of the Group.Session.Timout setting - Interval time.Duration - } - - // Return specifies which group channels will be populated. If they are set to true, - // you must read from the respective channels to prevent deadlock. - Return struct { - // If enabled, rebalance notification will be returned on the - // Notifications channel (default disabled). - Notifications bool - } - - Topics struct { - // An additional whitelist of topics to subscribe to. - Whitelist *regexp.Regexp - // An additional blacklist of topics to avoid. If set, this will precede over - // the Whitelist setting. - Blacklist *regexp.Regexp - } - - Member struct { - // Custom metadata to include when joining the group. The user data for all joined members - // can be retrieved by sending a DescribeGroupRequest to the broker that is the - // coordinator for the group. - UserData []byte - } - } -} - -// NewConfig returns a new configuration instance with sane defaults. -func NewConfig() *Config { - c := &Config{ - Config: *sarama.NewConfig(), - } - c.Group.PartitionStrategy = StrategyRange - c.Group.Offsets.Retry.Max = 3 - c.Group.Offsets.Synchronization.DwellTime = c.Consumer.MaxProcessingTime - c.Group.Session.Timeout = 30 * time.Second - c.Group.Heartbeat.Interval = 3 * time.Second - c.Config.Version = minVersion - return c -} - -// Validate checks a Config instance. It will return a -// sarama.ConfigurationError if the specified values don't make sense. -func (c *Config) Validate() error { - if c.Group.Heartbeat.Interval%time.Millisecond != 0 { - sarama.Logger.Println("Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.") - } - if c.Group.Session.Timeout%time.Millisecond != 0 { - sarama.Logger.Println("Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.") - } - if c.Group.PartitionStrategy != StrategyRange && c.Group.PartitionStrategy != StrategyRoundRobin { - sarama.Logger.Println("Group.PartitionStrategy is not supported; range will be assumed.") - } - if !c.Version.IsAtLeast(minVersion) { - sarama.Logger.Println("Version is not supported; 0.9. will be assumed.") - c.Version = minVersion - } - if err := c.Config.Validate(); err != nil { - return err - } - - // validate the Group values - switch { - case c.Group.Offsets.Retry.Max < 0: - return sarama.ConfigurationError("Group.Offsets.Retry.Max must be >= 0") - case c.Group.Offsets.Synchronization.DwellTime <= 0: - return sarama.ConfigurationError("Group.Offsets.Synchronization.DwellTime must be > 0") - case c.Group.Offsets.Synchronization.DwellTime > 10*time.Minute: - return sarama.ConfigurationError("Group.Offsets.Synchronization.DwellTime must be <= 10m") - case c.Group.Heartbeat.Interval <= 0: - return sarama.ConfigurationError("Group.Heartbeat.Interval must be > 0") - case c.Group.Session.Timeout <= 0: - return sarama.ConfigurationError("Group.Session.Timeout must be > 0") - case !c.Metadata.Full && c.Group.Topics.Whitelist != nil: - return sarama.ConfigurationError("Metadata.Full must be enabled when Group.Topics.Whitelist is used") - case !c.Metadata.Full && c.Group.Topics.Blacklist != nil: - return sarama.ConfigurationError("Metadata.Full must be enabled when Group.Topics.Blacklist is used") - } - - // ensure offset is correct - switch c.Consumer.Offsets.Initial { - case sarama.OffsetOldest, sarama.OffsetNewest: - default: - return sarama.ConfigurationError("Consumer.Offsets.Initial must be either OffsetOldest or OffsetNewest") - } - - return nil -} diff --git a/vendor/github.com/bsm/sarama-cluster/consumer.go b/vendor/github.com/bsm/sarama-cluster/consumer.go deleted file mode 100644 index 761596d1a5..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/consumer.go +++ /dev/null @@ -1,875 +0,0 @@ -package cluster - -import ( - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/Shopify/sarama" -) - -// Consumer is a cluster group consumer -type Consumer struct { - client *Client - ownClient bool - - consumer sarama.Consumer - subs *partitionMap - - consumerID string - groupID string - - memberID string - generationID int32 - membershipMu sync.RWMutex - - coreTopics []string - extraTopics []string - - dying, dead chan none - closeOnce sync.Once - - consuming int32 - messages chan *sarama.ConsumerMessage - errors chan error - partitions chan PartitionConsumer - notifications chan *Notification - - commitMu sync.Mutex -} - -// NewConsumer initializes a new consumer -func NewConsumer(addrs []string, groupID string, topics []string, config *Config) (*Consumer, error) { - client, err := NewClient(addrs, config) - if err != nil { - return nil, err - } - - consumer, err := NewConsumerFromClient(client, groupID, topics) - if err != nil { - return nil, err - } - consumer.ownClient = true - return consumer, nil -} - -// NewConsumerFromClient initializes a new consumer from an existing client. -// -// Please note that clients cannot be shared between consumers (due to Kafka internals), -// they can only be re-used which requires the user to call Close() on the first consumer -// before using this method again to initialize another one. Attempts to use a client with -// more than one consumer at a time will return errors. -func NewConsumerFromClient(client *Client, groupID string, topics []string) (*Consumer, error) { - if !client.claim() { - return nil, errClientInUse - } - - consumer, err := sarama.NewConsumerFromClient(client.Client) - if err != nil { - client.release() - return nil, err - } - - sort.Strings(topics) - c := &Consumer{ - client: client, - consumer: consumer, - subs: newPartitionMap(), - groupID: groupID, - - coreTopics: topics, - - dying: make(chan none), - dead: make(chan none), - - messages: make(chan *sarama.ConsumerMessage), - errors: make(chan error, client.config.ChannelBufferSize), - partitions: make(chan PartitionConsumer, 1), - notifications: make(chan *Notification), - } - if err := c.client.RefreshCoordinator(groupID); err != nil { - client.release() - return nil, err - } - - go c.mainLoop() - return c, nil -} - -// Messages returns the read channel for the messages that are returned by -// the broker. -// -// This channel will only return if Config.Group.Mode option is set to -// ConsumerModeMultiplex (default). -func (c *Consumer) Messages() <-chan *sarama.ConsumerMessage { return c.messages } - -// Partitions returns the read channels for individual partitions of this broker. -// -// This will channel will only return if Config.Group.Mode option is set to -// ConsumerModePartitions. -// -// The Partitions() channel must be listened to for the life of this consumer; -// when a rebalance happens old partitions will be closed (naturally come to -// completion) and new ones will be emitted. The returned channel will only close -// when the consumer is completely shut down. -func (c *Consumer) Partitions() <-chan PartitionConsumer { return c.partitions } - -// Errors returns a read channel of errors that occur during offset management, if -// enabled. By default, errors are logged and not returned over this channel. If -// you want to implement any custom error handling, set your config's -// Consumer.Return.Errors setting to true, and read from this channel. -func (c *Consumer) Errors() <-chan error { return c.errors } - -// Notifications returns a channel of Notifications that occur during consumer -// rebalancing. Notifications will only be emitted over this channel, if your config's -// Group.Return.Notifications setting to true. -func (c *Consumer) Notifications() <-chan *Notification { return c.notifications } - -// HighWaterMarks returns the current high water marks for each topic and partition -// Consistency between partitions is not guaranteed since high water marks are updated separately. -func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { return c.consumer.HighWaterMarks() } - -// MarkOffset marks the provided message as processed, alongside a metadata string -// that represents the state of the partition consumer at that point in time. The -// metadata string can be used by another consumer to restore that state, so it -// can resume consumption. -// -// Note: calling MarkOffset does not necessarily commit the offset to the backend -// store immediately for efficiency reasons, and it may never be committed if -// your application crashes. This means that you may end up processing the same -// message twice, and your processing should ideally be idempotent. -func (c *Consumer) MarkOffset(msg *sarama.ConsumerMessage, metadata string) { - c.subs.Fetch(msg.Topic, msg.Partition).MarkOffset(msg.Offset+1, metadata) -} - -// MarkPartitionOffset marks an offset of the provided topic/partition as processed. -// See MarkOffset for additional explanation. -func (c *Consumer) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) { - c.subs.Fetch(topic, partition).MarkOffset(offset+1, metadata) -} - -// MarkOffsets marks stashed offsets as processed. -// See MarkOffset for additional explanation. -func (c *Consumer) MarkOffsets(s *OffsetStash) { - s.mu.Lock() - defer s.mu.Unlock() - - for tp, info := range s.offsets { - c.subs.Fetch(tp.Topic, tp.Partition).MarkOffset(info.Offset+1, info.Metadata) - delete(s.offsets, tp) - } -} - -// Subscriptions returns the consumed topics and partitions -func (c *Consumer) Subscriptions() map[string][]int32 { - return c.subs.Info() -} - -// CommitOffsets allows to manually commit previously marked offsets. By default there is no -// need to call this function as the consumer will commit offsets automatically -// using the Config.Consumer.Offsets.CommitInterval setting. -// -// Please be aware that calling this function during an internal rebalance cycle may return -// broker errors (e.g. sarama.ErrUnknownMemberId or sarama.ErrIllegalGeneration). -func (c *Consumer) CommitOffsets() error { - c.commitMu.Lock() - defer c.commitMu.Unlock() - - memberID, generationID := c.membership() - req := &sarama.OffsetCommitRequest{ - Version: 2, - ConsumerGroup: c.groupID, - ConsumerGroupGeneration: generationID, - ConsumerID: memberID, - RetentionTime: -1, - } - - if ns := c.client.config.Consumer.Offsets.Retention; ns != 0 { - req.RetentionTime = int64(ns / time.Millisecond) - } - - snap := c.subs.Snapshot() - dirty := false - for tp, state := range snap { - if state.Dirty { - dirty = true - req.AddBlock(tp.Topic, tp.Partition, state.Info.Offset, 0, state.Info.Metadata) - } - } - if !dirty { - return nil - } - - broker, err := c.client.Coordinator(c.groupID) - if err != nil { - c.closeCoordinator(broker, err) - return err - } - - resp, err := broker.CommitOffset(req) - if err != nil { - c.closeCoordinator(broker, err) - return err - } - - for topic, errs := range resp.Errors { - for partition, kerr := range errs { - if kerr != sarama.ErrNoError { - err = kerr - } else if state, ok := snap[topicPartition{topic, partition}]; ok { - c.subs.Fetch(topic, partition).MarkCommitted(state.Info.Offset) - } - } - } - return err -} - -// Close safely closes the consumer and releases all resources -func (c *Consumer) Close() (err error) { - c.closeOnce.Do(func() { - close(c.dying) - <-c.dead - - if e := c.release(); e != nil { - err = e - } - if e := c.consumer.Close(); e != nil { - err = e - } - close(c.messages) - close(c.errors) - - if e := c.leaveGroup(); e != nil { - err = e - } - close(c.partitions) - close(c.notifications) - - // drain - for range c.messages { - } - for range c.errors { - } - for p := range c.partitions { - _ = p.Close() - } - for range c.notifications { - } - - c.client.release() - if c.ownClient { - if e := c.client.Close(); e != nil { - err = e - } - } - }) - return -} - -func (c *Consumer) mainLoop() { - defer close(c.dead) - defer atomic.StoreInt32(&c.consuming, 0) - - for { - atomic.StoreInt32(&c.consuming, 0) - - // Check if close was requested - select { - case <-c.dying: - return - default: - } - - // Start next consume cycle - c.nextTick() - } -} - -func (c *Consumer) nextTick() { - // Remember previous subscriptions - var notification *Notification - if c.client.config.Group.Return.Notifications { - notification = newNotification(c.subs.Info()) - } - - // Refresh coordinator - if err := c.refreshCoordinator(); err != nil { - c.rebalanceError(err, nil) - return - } - - // Release subscriptions - if err := c.release(); err != nil { - c.rebalanceError(err, nil) - return - } - - // Issue rebalance start notification - if c.client.config.Group.Return.Notifications { - c.handleNotification(notification) - } - - // Rebalance, fetch new subscriptions - subs, err := c.rebalance() - if err != nil { - c.rebalanceError(err, notification) - return - } - - // Coordinate loops, make sure everything is - // stopped on exit - tomb := newLoopTomb() - defer tomb.Close() - - // Start the heartbeat - tomb.Go(c.hbLoop) - - // Subscribe to topic/partitions - if err := c.subscribe(tomb, subs); err != nil { - c.rebalanceError(err, notification) - return - } - - // Update/issue notification with new claims - if c.client.config.Group.Return.Notifications { - notification = notification.success(subs) - c.handleNotification(notification) - } - - // Start topic watcher loop - tomb.Go(c.twLoop) - - // Start consuming and committing offsets - tomb.Go(c.cmLoop) - atomic.StoreInt32(&c.consuming, 1) - - // Wait for signals - select { - case <-tomb.Dying(): - case <-c.dying: - } -} - -// heartbeat loop, triggered by the mainLoop -func (c *Consumer) hbLoop(stopped <-chan none) { - ticker := time.NewTicker(c.client.config.Group.Heartbeat.Interval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - switch err := c.heartbeat(); err { - case nil, sarama.ErrNoError: - case sarama.ErrNotCoordinatorForConsumer, sarama.ErrRebalanceInProgress: - return - default: - c.handleError(&Error{Ctx: "heartbeat", error: err}) - return - } - case <-stopped: - return - case <-c.dying: - return - } - } -} - -// topic watcher loop, triggered by the mainLoop -func (c *Consumer) twLoop(stopped <-chan none) { - ticker := time.NewTicker(c.client.config.Metadata.RefreshFrequency / 2) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - topics, err := c.client.Topics() - if err != nil { - c.handleError(&Error{Ctx: "topics", error: err}) - return - } - - for _, topic := range topics { - if !c.isKnownCoreTopic(topic) && - !c.isKnownExtraTopic(topic) && - c.isPotentialExtraTopic(topic) { - return - } - } - case <-stopped: - return - case <-c.dying: - return - } - } -} - -// commit loop, triggered by the mainLoop -func (c *Consumer) cmLoop(stopped <-chan none) { - ticker := time.NewTicker(c.client.config.Consumer.Offsets.CommitInterval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - if err := c.commitOffsetsWithRetry(c.client.config.Group.Offsets.Retry.Max); err != nil { - c.handleError(&Error{Ctx: "commit", error: err}) - return - } - case <-stopped: - return - case <-c.dying: - return - } - } -} - -func (c *Consumer) rebalanceError(err error, n *Notification) { - if n != nil { - n.Type = RebalanceError - c.handleNotification(n) - } - - switch err { - case sarama.ErrRebalanceInProgress: - default: - c.handleError(&Error{Ctx: "rebalance", error: err}) - } - - select { - case <-c.dying: - case <-time.After(c.client.config.Metadata.Retry.Backoff): - } -} - -func (c *Consumer) handleNotification(n *Notification) { - if c.client.config.Group.Return.Notifications { - select { - case c.notifications <- n: - case <-c.dying: - return - } - } -} - -func (c *Consumer) handleError(e *Error) { - if c.client.config.Consumer.Return.Errors { - select { - case c.errors <- e: - case <-c.dying: - return - } - } else { - sarama.Logger.Printf("%s error: %s\n", e.Ctx, e.Error()) - } -} - -// Releases the consumer and commits offsets, called from rebalance() and Close() -func (c *Consumer) release() (err error) { - // Stop all consumers - c.subs.Stop() - - // Clear subscriptions on exit - defer c.subs.Clear() - - // Wait for messages to be processed - timeout := time.NewTimer(c.client.config.Group.Offsets.Synchronization.DwellTime) - defer timeout.Stop() - - select { - case <-c.dying: - case <-timeout.C: - } - - // Commit offsets, continue on errors - if e := c.commitOffsetsWithRetry(c.client.config.Group.Offsets.Retry.Max); e != nil { - err = e - } - - return -} - -// -------------------------------------------------------------------- - -// Performs a heartbeat, part of the mainLoop() -func (c *Consumer) heartbeat() error { - broker, err := c.client.Coordinator(c.groupID) - if err != nil { - c.closeCoordinator(broker, err) - return err - } - - memberID, generationID := c.membership() - resp, err := broker.Heartbeat(&sarama.HeartbeatRequest{ - GroupId: c.groupID, - MemberId: memberID, - GenerationId: generationID, - }) - if err != nil { - c.closeCoordinator(broker, err) - return err - } - return resp.Err -} - -// Performs a rebalance, part of the mainLoop() -func (c *Consumer) rebalance() (map[string][]int32, error) { - memberID, _ := c.membership() - sarama.Logger.Printf("cluster/consumer %s rebalance\n", memberID) - - allTopics, err := c.client.Topics() - if err != nil { - return nil, err - } - c.extraTopics = c.selectExtraTopics(allTopics) - sort.Strings(c.extraTopics) - - // Re-join consumer group - strategy, err := c.joinGroup() - switch { - case err == sarama.ErrUnknownMemberId: - c.membershipMu.Lock() - c.memberID = "" - c.membershipMu.Unlock() - return nil, err - case err != nil: - return nil, err - } - - // Sync consumer group state, fetch subscriptions - subs, err := c.syncGroup(strategy) - switch { - case err == sarama.ErrRebalanceInProgress: - return nil, err - case err != nil: - _ = c.leaveGroup() - return nil, err - } - return subs, nil -} - -// Performs the subscription, part of the mainLoop() -func (c *Consumer) subscribe(tomb *loopTomb, subs map[string][]int32) error { - // fetch offsets - offsets, err := c.fetchOffsets(subs) - if err != nil { - _ = c.leaveGroup() - return err - } - - // create consumers in parallel - var mu sync.Mutex - var wg sync.WaitGroup - - for topic, partitions := range subs { - for _, partition := range partitions { - wg.Add(1) - - info := offsets[topic][partition] - go func(topic string, partition int32) { - if e := c.createConsumer(tomb, topic, partition, info); e != nil { - mu.Lock() - err = e - mu.Unlock() - } - wg.Done() - }(topic, partition) - } - } - wg.Wait() - - if err != nil { - _ = c.release() - _ = c.leaveGroup() - } - return err -} - -// -------------------------------------------------------------------- - -// Send a request to the broker to join group on rebalance() -func (c *Consumer) joinGroup() (*balancer, error) { - memberID, _ := c.membership() - req := &sarama.JoinGroupRequest{ - GroupId: c.groupID, - MemberId: memberID, - SessionTimeout: int32(c.client.config.Group.Session.Timeout / time.Millisecond), - ProtocolType: "consumer", - } - - meta := &sarama.ConsumerGroupMemberMetadata{ - Version: 1, - Topics: append(c.coreTopics, c.extraTopics...), - UserData: c.client.config.Group.Member.UserData, - } - err := req.AddGroupProtocolMetadata(string(StrategyRange), meta) - if err != nil { - return nil, err - } - err = req.AddGroupProtocolMetadata(string(StrategyRoundRobin), meta) - if err != nil { - return nil, err - } - - broker, err := c.client.Coordinator(c.groupID) - if err != nil { - c.closeCoordinator(broker, err) - return nil, err - } - - resp, err := broker.JoinGroup(req) - if err != nil { - c.closeCoordinator(broker, err) - return nil, err - } else if resp.Err != sarama.ErrNoError { - c.closeCoordinator(broker, resp.Err) - return nil, resp.Err - } - - var strategy *balancer - if resp.LeaderId == resp.MemberId { - members, err := resp.GetMembers() - if err != nil { - return nil, err - } - - strategy, err = newBalancerFromMeta(c.client, members) - if err != nil { - return nil, err - } - } - - c.membershipMu.Lock() - c.memberID = resp.MemberId - c.generationID = resp.GenerationId - c.membershipMu.Unlock() - - return strategy, nil -} - -// Send a request to the broker to sync the group on rebalance(). -// Returns a list of topics and partitions to consume. -func (c *Consumer) syncGroup(strategy *balancer) (map[string][]int32, error) { - memberID, generationID := c.membership() - req := &sarama.SyncGroupRequest{ - GroupId: c.groupID, - MemberId: memberID, - GenerationId: generationID, - } - - for memberID, topics := range strategy.Perform(c.client.config.Group.PartitionStrategy) { - if err := req.AddGroupAssignmentMember(memberID, &sarama.ConsumerGroupMemberAssignment{ - Version: 1, - Topics: topics, - }); err != nil { - return nil, err - } - } - - broker, err := c.client.Coordinator(c.groupID) - if err != nil { - c.closeCoordinator(broker, err) - return nil, err - } - - resp, err := broker.SyncGroup(req) - if err != nil { - c.closeCoordinator(broker, err) - return nil, err - } else if resp.Err != sarama.ErrNoError { - c.closeCoordinator(broker, resp.Err) - return nil, resp.Err - } - - // Return if there is nothing to subscribe to - if len(resp.MemberAssignment) == 0 { - return nil, nil - } - - // Get assigned subscriptions - members, err := resp.GetMemberAssignment() - if err != nil { - return nil, err - } - - // Sort partitions, for each topic - for topic := range members.Topics { - sort.Sort(int32Slice(members.Topics[topic])) - } - return members.Topics, nil -} - -// Fetches latest committed offsets for all subscriptions -func (c *Consumer) fetchOffsets(subs map[string][]int32) (map[string]map[int32]offsetInfo, error) { - offsets := make(map[string]map[int32]offsetInfo, len(subs)) - req := &sarama.OffsetFetchRequest{ - Version: 1, - ConsumerGroup: c.groupID, - } - - for topic, partitions := range subs { - offsets[topic] = make(map[int32]offsetInfo, len(partitions)) - for _, partition := range partitions { - offsets[topic][partition] = offsetInfo{Offset: -1} - req.AddPartition(topic, partition) - } - } - - broker, err := c.client.Coordinator(c.groupID) - if err != nil { - c.closeCoordinator(broker, err) - return nil, err - } - - resp, err := broker.FetchOffset(req) - if err != nil { - c.closeCoordinator(broker, err) - return nil, err - } - - for topic, partitions := range subs { - for _, partition := range partitions { - block := resp.GetBlock(topic, partition) - if block == nil { - return nil, sarama.ErrIncompleteResponse - } - - if block.Err == sarama.ErrNoError { - offsets[topic][partition] = offsetInfo{Offset: block.Offset, Metadata: block.Metadata} - } else { - return nil, block.Err - } - } - } - return offsets, nil -} - -// Send a request to the broker to leave the group on failes rebalance() and on Close() -func (c *Consumer) leaveGroup() error { - broker, err := c.client.Coordinator(c.groupID) - if err != nil { - c.closeCoordinator(broker, err) - return err - } - - memberID, _ := c.membership() - if _, err = broker.LeaveGroup(&sarama.LeaveGroupRequest{ - GroupId: c.groupID, - MemberId: memberID, - }); err != nil { - c.closeCoordinator(broker, err) - } - return err -} - -// -------------------------------------------------------------------- - -func (c *Consumer) createConsumer(tomb *loopTomb, topic string, partition int32, info offsetInfo) error { - memberID, _ := c.membership() - sarama.Logger.Printf("cluster/consumer %s consume %s/%d from %d\n", memberID, topic, partition, info.NextOffset(c.client.config.Consumer.Offsets.Initial)) - - // Create partitionConsumer - pc, err := newPartitionConsumer(c.consumer, topic, partition, info, c.client.config.Consumer.Offsets.Initial) - if err != nil { - return err - } - - // Store in subscriptions - c.subs.Store(topic, partition, pc) - - // Start partition consumer goroutine - tomb.Go(func(stopper <-chan none) { - if c.client.config.Group.Mode == ConsumerModePartitions { - pc.WaitFor(stopper, c.errors) - } else { - pc.Multiplex(stopper, c.messages, c.errors) - } - }) - - if c.client.config.Group.Mode == ConsumerModePartitions { - c.partitions <- pc - } - return nil -} - -func (c *Consumer) commitOffsetsWithRetry(retries int) error { - err := c.CommitOffsets() - if err != nil && retries > 0 { - return c.commitOffsetsWithRetry(retries - 1) - } - return err -} - -func (c *Consumer) closeCoordinator(broker *sarama.Broker, err error) { - if broker != nil { - _ = broker.Close() - } - - switch err { - case sarama.ErrConsumerCoordinatorNotAvailable, sarama.ErrNotCoordinatorForConsumer: - _ = c.client.RefreshCoordinator(c.groupID) - } -} - -func (c *Consumer) selectExtraTopics(allTopics []string) []string { - extra := allTopics[:0] - for _, topic := range allTopics { - if !c.isKnownCoreTopic(topic) && c.isPotentialExtraTopic(topic) { - extra = append(extra, topic) - } - } - return extra -} - -func (c *Consumer) isKnownCoreTopic(topic string) bool { - pos := sort.SearchStrings(c.coreTopics, topic) - return pos < len(c.coreTopics) && c.coreTopics[pos] == topic -} - -func (c *Consumer) isKnownExtraTopic(topic string) bool { - pos := sort.SearchStrings(c.extraTopics, topic) - return pos < len(c.extraTopics) && c.extraTopics[pos] == topic -} - -func (c *Consumer) isPotentialExtraTopic(topic string) bool { - rx := c.client.config.Group.Topics - if rx.Blacklist != nil && rx.Blacklist.MatchString(topic) { - return false - } - if rx.Whitelist != nil && rx.Whitelist.MatchString(topic) { - return true - } - return false -} - -func (c *Consumer) refreshCoordinator() error { - if err := c.refreshMetadata(); err != nil { - return err - } - return c.client.RefreshCoordinator(c.groupID) -} - -func (c *Consumer) refreshMetadata() (err error) { - if c.client.config.Metadata.Full { - err = c.client.RefreshMetadata() - } else { - var topics []string - if topics, err = c.client.Topics(); err == nil && len(topics) != 0 { - err = c.client.RefreshMetadata(topics...) - } - } - - // maybe we didn't have authorization to describe all topics - switch err { - case sarama.ErrTopicAuthorizationFailed: - err = c.client.RefreshMetadata(c.coreTopics...) - } - return -} - -func (c *Consumer) membership() (memberID string, generationID int32) { - c.membershipMu.RLock() - memberID, generationID = c.memberID, c.generationID - c.membershipMu.RUnlock() - return -} diff --git a/vendor/github.com/bsm/sarama-cluster/doc.go b/vendor/github.com/bsm/sarama-cluster/doc.go deleted file mode 100644 index 9c8ff16a77..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -/* -Package cluster provides cluster extensions for Sarama, enabing users -to consume topics across from multiple, balanced nodes. - -It requires Kafka v0.9+ and follows the steps guide, described in: -https://cwiki.apache.org/confluence/display/KAFKA/Kafka+0.9+Consumer+Rewrite+Design -*/ -package cluster diff --git a/vendor/github.com/bsm/sarama-cluster/offsets.go b/vendor/github.com/bsm/sarama-cluster/offsets.go deleted file mode 100644 index b2abe355f2..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/offsets.go +++ /dev/null @@ -1,49 +0,0 @@ -package cluster - -import ( - "sync" - - "github.com/Shopify/sarama" -) - -// OffsetStash allows to accumulate offsets and -// mark them as processed in a bulk -type OffsetStash struct { - offsets map[topicPartition]offsetInfo - mu sync.Mutex -} - -// NewOffsetStash inits a blank stash -func NewOffsetStash() *OffsetStash { - return &OffsetStash{offsets: make(map[topicPartition]offsetInfo)} -} - -// MarkOffset stashes the provided message offset -func (s *OffsetStash) MarkOffset(msg *sarama.ConsumerMessage, metadata string) { - s.MarkPartitionOffset(msg.Topic, msg.Partition, msg.Offset, metadata) -} - -// MarkPartitionOffset stashes the offset for the provided topic/partition combination -func (s *OffsetStash) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) { - s.mu.Lock() - defer s.mu.Unlock() - - key := topicPartition{Topic: topic, Partition: partition} - if info := s.offsets[key]; offset >= info.Offset { - info.Offset = offset - info.Metadata = metadata - s.offsets[key] = info - } -} - -// Offsets returns the latest stashed offsets by topic-partition -func (s *OffsetStash) Offsets() map[string]int64 { - s.mu.Lock() - defer s.mu.Unlock() - - res := make(map[string]int64, len(s.offsets)) - for tp, info := range s.offsets { - res[tp.String()] = info.Offset - } - return res -} diff --git a/vendor/github.com/bsm/sarama-cluster/partitions.go b/vendor/github.com/bsm/sarama-cluster/partitions.go deleted file mode 100644 index d5f59af4ea..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/partitions.go +++ /dev/null @@ -1,277 +0,0 @@ -package cluster - -import ( - "sort" - "sync" - "time" - - "github.com/Shopify/sarama" -) - -// PartitionConsumer allows code to consume individual partitions from the cluster. -// -// See docs for Consumer.Partitions() for more on how to implement this. -type PartitionConsumer interface { - - // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown, drain - // the Messages channel, harvest any errors & return them to the caller and trigger a rebalance. - Close() error - - // Messages returns the read channel for the messages that are returned by - // the broker. - Messages() <-chan *sarama.ConsumerMessage - - // HighWaterMarkOffset returns the high water mark offset of the partition, - // i.e. the offset that will be used for the next message that will be produced. - // You can use this to determine how far behind the processing is. - HighWaterMarkOffset() int64 - - // Topic returns the consumed topic name - Topic() string - - // Partition returns the consumed partition - Partition() int32 -} - -type partitionConsumer struct { - sarama.PartitionConsumer - - state partitionState - mu sync.Mutex - - topic string - partition int32 - - once sync.Once - dying, dead chan none -} - -func newPartitionConsumer(manager sarama.Consumer, topic string, partition int32, info offsetInfo, defaultOffset int64) (*partitionConsumer, error) { - pcm, err := manager.ConsumePartition(topic, partition, info.NextOffset(defaultOffset)) - - // Resume from default offset, if requested offset is out-of-range - if err == sarama.ErrOffsetOutOfRange { - info.Offset = -1 - pcm, err = manager.ConsumePartition(topic, partition, defaultOffset) - } - if err != nil { - return nil, err - } - - return &partitionConsumer{ - PartitionConsumer: pcm, - state: partitionState{Info: info}, - - topic: topic, - partition: partition, - - dying: make(chan none), - dead: make(chan none), - }, nil -} - -// Topic implements PartitionConsumer -func (c *partitionConsumer) Topic() string { return c.topic } - -// Partition implements PartitionConsumer -func (c *partitionConsumer) Partition() int32 { return c.partition } - -func (c *partitionConsumer) WaitFor(stopper <-chan none, errors chan<- error) { - defer close(c.dead) - - for { - select { - case err, ok := <-c.Errors(): - if !ok { - return - } - select { - case errors <- err: - case <-stopper: - return - case <-c.dying: - return - } - case <-stopper: - return - case <-c.dying: - return - } - } -} - -func (c *partitionConsumer) Multiplex(stopper <-chan none, messages chan<- *sarama.ConsumerMessage, errors chan<- error) { - defer close(c.dead) - - for { - select { - case msg, ok := <-c.Messages(): - if !ok { - return - } - select { - case messages <- msg: - case <-stopper: - return - case <-c.dying: - return - } - case err, ok := <-c.Errors(): - if !ok { - return - } - select { - case errors <- err: - case <-stopper: - return - case <-c.dying: - return - } - case <-stopper: - return - case <-c.dying: - return - } - } -} - -func (c *partitionConsumer) Close() (err error) { - c.once.Do(func() { - err = c.PartitionConsumer.Close() - close(c.dying) - }) - <-c.dead - return err -} - -func (c *partitionConsumer) State() partitionState { - if c == nil { - return partitionState{} - } - - c.mu.Lock() - state := c.state - c.mu.Unlock() - - return state -} - -func (c *partitionConsumer) MarkCommitted(offset int64) { - if c == nil { - return - } - - c.mu.Lock() - if offset == c.state.Info.Offset { - c.state.Dirty = false - } - c.mu.Unlock() -} - -func (c *partitionConsumer) MarkOffset(offset int64, metadata string) { - if c == nil { - return - } - - c.mu.Lock() - if offset > c.state.Info.Offset { - c.state.Info.Offset = offset - c.state.Info.Metadata = metadata - c.state.Dirty = true - } - c.mu.Unlock() -} - -// -------------------------------------------------------------------- - -type partitionState struct { - Info offsetInfo - Dirty bool - LastCommit time.Time -} - -// -------------------------------------------------------------------- - -type partitionMap struct { - data map[topicPartition]*partitionConsumer - mu sync.RWMutex -} - -func newPartitionMap() *partitionMap { - return &partitionMap{ - data: make(map[topicPartition]*partitionConsumer), - } -} - -func (m *partitionMap) IsSubscribedTo(topic string) bool { - m.mu.RLock() - defer m.mu.RUnlock() - - for tp := range m.data { - if tp.Topic == topic { - return true - } - } - return false -} - -func (m *partitionMap) Fetch(topic string, partition int32) *partitionConsumer { - m.mu.RLock() - pc, _ := m.data[topicPartition{topic, partition}] - m.mu.RUnlock() - return pc -} - -func (m *partitionMap) Store(topic string, partition int32, pc *partitionConsumer) { - m.mu.Lock() - m.data[topicPartition{topic, partition}] = pc - m.mu.Unlock() -} - -func (m *partitionMap) Snapshot() map[topicPartition]partitionState { - m.mu.RLock() - defer m.mu.RUnlock() - - snap := make(map[topicPartition]partitionState, len(m.data)) - for tp, pc := range m.data { - snap[tp] = pc.State() - } - return snap -} - -func (m *partitionMap) Stop() { - m.mu.RLock() - defer m.mu.RUnlock() - - var wg sync.WaitGroup - for tp := range m.data { - wg.Add(1) - go func(p *partitionConsumer) { - _ = p.Close() - wg.Done() - }(m.data[tp]) - } - wg.Wait() -} - -func (m *partitionMap) Clear() { - m.mu.Lock() - for tp := range m.data { - delete(m.data, tp) - } - m.mu.Unlock() -} - -func (m *partitionMap) Info() map[string][]int32 { - info := make(map[string][]int32) - m.mu.RLock() - for tp := range m.data { - info[tp.Topic] = append(info[tp.Topic], tp.Partition) - } - m.mu.RUnlock() - - for topic := range info { - sort.Sort(int32Slice(info[topic])) - } - return info -} diff --git a/vendor/github.com/bsm/sarama-cluster/util.go b/vendor/github.com/bsm/sarama-cluster/util.go deleted file mode 100644 index e7cb5dd1b8..0000000000 --- a/vendor/github.com/bsm/sarama-cluster/util.go +++ /dev/null @@ -1,75 +0,0 @@ -package cluster - -import ( - "fmt" - "sort" - "sync" -) - -type none struct{} - -type topicPartition struct { - Topic string - Partition int32 -} - -func (tp *topicPartition) String() string { - return fmt.Sprintf("%s-%d", tp.Topic, tp.Partition) -} - -type offsetInfo struct { - Offset int64 - Metadata string -} - -func (i offsetInfo) NextOffset(fallback int64) int64 { - if i.Offset > -1 { - return i.Offset - } - return fallback -} - -type int32Slice []int32 - -func (p int32Slice) Len() int { return len(p) } -func (p int32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p int32Slice) Diff(o int32Slice) (res []int32) { - on := len(o) - for _, x := range p { - n := sort.Search(on, func(i int) bool { return o[i] >= x }) - if n < on && o[n] == x { - continue - } - res = append(res, x) - } - return -} - -// -------------------------------------------------------------------- - -type loopTomb struct { - c chan none - o sync.Once - w sync.WaitGroup -} - -func newLoopTomb() *loopTomb { - return &loopTomb{c: make(chan none)} -} - -func (t *loopTomb) stop() { t.o.Do(func() { close(t.c) }) } -func (t *loopTomb) Close() { t.stop(); t.w.Wait() } - -func (t *loopTomb) Dying() <-chan none { return t.c } -func (t *loopTomb) Go(f func(<-chan none)) { - t.w.Add(1) - - go func() { - defer t.stop() - defer t.w.Done() - - f(t.c) - }() -} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index c836416192..0000000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 8a4a6589a2..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) -) - -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } - - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } - } -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } - } - - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() - } - return rv -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 1fe3cf3d5d..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 7c519ff47a..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f312..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6f1e..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index df1d582a72..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound == true: - d.w.Write(nilAngleBytes) - - case cycleFound == true: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index c49875bacb..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound == true: - f.fs.Write(nilAngleBytes) - - case cycleFound == true: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e33882..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/eapache/go-resiliency/LICENSE b/vendor/github.com/eapache/go-resiliency/LICENSE deleted file mode 100644 index 698a3f5139..0000000000 --- a/vendor/github.com/eapache/go-resiliency/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Evan Huus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md deleted file mode 100644 index 7262bfc282..0000000000 --- a/vendor/github.com/eapache/go-resiliency/breaker/README.md +++ /dev/null @@ -1,33 +0,0 @@ -circuit-breaker -=============== - -[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) -[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker) - -The circuit-breaker resiliency pattern for golang. - -Creating a breaker takes three parameters: -- error threshold (for opening the breaker) -- success threshold (for closing the breaker) -- timeout (how long to keep the breaker open) - -```go -b := breaker.New(3, 1, 5*time.Second) - -for { - result := b.Run(func() error { - // communicate with some external service and - // return an error if the communication failed - return nil - }) - - switch result { - case nil: - // success! - case breaker.ErrBreakerOpen: - // our function wasn't run because the breaker was open - default: - // some other error - } -} -``` diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go deleted file mode 100644 index f88ca7248b..0000000000 --- a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go +++ /dev/null @@ -1,161 +0,0 @@ -// Package breaker implements the circuit-breaker resiliency pattern for Go. -package breaker - -import ( - "errors" - "sync" - "sync/atomic" - "time" -) - -// ErrBreakerOpen is the error returned from Run() when the function is not executed -// because the breaker is currently open. -var ErrBreakerOpen = errors.New("circuit breaker is open") - -const ( - closed uint32 = iota - open - halfOpen -) - -// Breaker implements the circuit-breaker resiliency pattern -type Breaker struct { - errorThreshold, successThreshold int - timeout time.Duration - - lock sync.Mutex - state uint32 - errors, successes int - lastError time.Time -} - -// New constructs a new circuit-breaker that starts closed. -// From closed, the breaker opens if "errorThreshold" errors are seen -// without an error-free period of at least "timeout". From open, the -// breaker half-closes after "timeout". From half-open, the breaker closes -// after "successThreshold" consecutive successes, or opens on a single error. -func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker { - return &Breaker{ - errorThreshold: errorThreshold, - successThreshold: successThreshold, - timeout: timeout, - } -} - -// Run will either return ErrBreakerOpen immediately if the circuit-breaker is -// already open, or it will run the given function and pass along its return -// value. It is safe to call Run concurrently on the same Breaker. -func (b *Breaker) Run(work func() error) error { - state := atomic.LoadUint32(&b.state) - - if state == open { - return ErrBreakerOpen - } - - return b.doWork(state, work) -} - -// Go will either return ErrBreakerOpen immediately if the circuit-breaker is -// already open, or it will run the given function in a separate goroutine. -// If the function is run, Go will return nil immediately, and will *not* return -// the return value of the function. It is safe to call Go concurrently on the -// same Breaker. -func (b *Breaker) Go(work func() error) error { - state := atomic.LoadUint32(&b.state) - - if state == open { - return ErrBreakerOpen - } - - // errcheck complains about ignoring the error return value, but - // that's on purpose; if you want an error from a goroutine you have to - // get it over a channel or something - go b.doWork(state, work) - - return nil -} - -func (b *Breaker) doWork(state uint32, work func() error) error { - var panicValue interface{} - - result := func() error { - defer func() { - panicValue = recover() - }() - return work() - }() - - if result == nil && panicValue == nil && state == closed { - // short-circuit the normal, success path without contending - // on the lock - return nil - } - - // oh well, I guess we have to contend on the lock - b.processResult(result, panicValue) - - if panicValue != nil { - // as close as Go lets us come to a "rethrow" although unfortunately - // we lose the original panicing location - panic(panicValue) - } - - return result -} - -func (b *Breaker) processResult(result error, panicValue interface{}) { - b.lock.Lock() - defer b.lock.Unlock() - - if result == nil && panicValue == nil { - if b.state == halfOpen { - b.successes++ - if b.successes == b.successThreshold { - b.closeBreaker() - } - } - } else { - if b.errors > 0 { - expiry := b.lastError.Add(b.timeout) - if time.Now().After(expiry) { - b.errors = 0 - } - } - - switch b.state { - case closed: - b.errors++ - if b.errors == b.errorThreshold { - b.openBreaker() - } else { - b.lastError = time.Now() - } - case halfOpen: - b.openBreaker() - } - } -} - -func (b *Breaker) openBreaker() { - b.changeState(open) - go b.timer() -} - -func (b *Breaker) closeBreaker() { - b.changeState(closed) -} - -func (b *Breaker) timer() { - time.Sleep(b.timeout) - - b.lock.Lock() - defer b.lock.Unlock() - - b.changeState(halfOpen) -} - -func (b *Breaker) changeState(newState uint32) { - b.errors = 0 - b.successes = 0 - atomic.StoreUint32(&b.state, newState) -} diff --git a/vendor/github.com/eapache/go-xerial-snappy/.gitignore b/vendor/github.com/eapache/go-xerial-snappy/.gitignore deleted file mode 100644 index daf913b1b3..0000000000 --- a/vendor/github.com/eapache/go-xerial-snappy/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml deleted file mode 100644 index d6cf4f1fa1..0000000000 --- a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: -- 1.5.4 -- 1.6.1 - -sudo: false diff --git a/vendor/github.com/eapache/go-xerial-snappy/LICENSE b/vendor/github.com/eapache/go-xerial-snappy/LICENSE deleted file mode 100644 index 5bf3688d9e..0000000000 --- a/vendor/github.com/eapache/go-xerial-snappy/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Evan Huus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/eapache/go-xerial-snappy/README.md b/vendor/github.com/eapache/go-xerial-snappy/README.md deleted file mode 100644 index 3f2695c728..0000000000 --- a/vendor/github.com/eapache/go-xerial-snappy/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# go-xerial-snappy - -[![Build Status](https://travis-ci.org/eapache/go-xerial-snappy.svg?branch=master)](https://travis-ci.org/eapache/go-xerial-snappy) - -Xerial-compatible Snappy framing support for golang. - -Packages using Xerial for snappy encoding use a framing format incompatible with -basically everything else in existence. This package wraps Go's built-in snappy -package to support it. - -Apps that use this format include Apache Kafka (see -https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for -details). diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go deleted file mode 100644 index b8f8b51fce..0000000000 --- a/vendor/github.com/eapache/go-xerial-snappy/snappy.go +++ /dev/null @@ -1,43 +0,0 @@ -package snappy - -import ( - "bytes" - "encoding/binary" - - master "github.com/golang/snappy" -) - -var xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0} - -// Encode encodes data as snappy with no framing header. -func Encode(src []byte) []byte { - return master.Encode(nil, src) -} - -// Decode decodes snappy data whether it is traditional unframed -// or includes the xerial framing format. -func Decode(src []byte) ([]byte, error) { - if !bytes.Equal(src[:8], xerialHeader) { - return master.Decode(nil, src) - } - - var ( - pos = uint32(16) - max = uint32(len(src)) - dst = make([]byte, 0, len(src)) - chunk []byte - err error - ) - for pos < max { - size := binary.BigEndian.Uint32(src[pos : pos+4]) - pos += 4 - - chunk, err = master.Decode(chunk, src[pos:pos+size]) - if err != nil { - return nil, err - } - pos += size - dst = append(dst, chunk...) - } - return dst, nil -} diff --git a/vendor/github.com/eapache/queue/.gitignore b/vendor/github.com/eapache/queue/.gitignore deleted file mode 100644 index 836562412f..0000000000 --- a/vendor/github.com/eapache/queue/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test diff --git a/vendor/github.com/eapache/queue/.travis.yml b/vendor/github.com/eapache/queue/.travis.yml deleted file mode 100644 index 235a40a493..0000000000 --- a/vendor/github.com/eapache/queue/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -sudo: false - -go: - - 1.2 - - 1.3 - - 1.4 diff --git a/vendor/github.com/eapache/queue/LICENSE b/vendor/github.com/eapache/queue/LICENSE deleted file mode 100644 index d5f36dbcaa..0000000000 --- a/vendor/github.com/eapache/queue/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Evan Huus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/eapache/queue/README.md b/vendor/github.com/eapache/queue/README.md deleted file mode 100644 index 8e782335cd..0000000000 --- a/vendor/github.com/eapache/queue/README.md +++ /dev/null @@ -1,16 +0,0 @@ -Queue -===== - -[![Build Status](https://travis-ci.org/eapache/queue.svg)](https://travis-ci.org/eapache/queue) -[![GoDoc](https://godoc.org/github.com/eapache/queue?status.png)](https://godoc.org/github.com/eapache/queue) -[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) - -A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki. -Using this instead of other, simpler, queue implementations (slice+append or linked list) provides -substantial memory and time benefits, and fewer GC pauses. - -The queue implemented here is as fast as it is in part because it is *not* thread-safe. - -Follows semantic versioning using https://gopkg.in/ - import from -[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1) -for guaranteed API stability. diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go deleted file mode 100644 index 71d1acdf27..0000000000 --- a/vendor/github.com/eapache/queue/queue.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki. -Using this instead of other, simpler, queue implementations (slice+append or linked list) provides -substantial memory and time benefits, and fewer GC pauses. - -The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe. -*/ -package queue - -// minQueueLen is smallest capacity that queue may have. -// Must be power of 2 for bitwise modulus: x % n == x & (n - 1). -const minQueueLen = 16 - -// Queue represents a single instance of the queue data structure. -type Queue struct { - buf []interface{} - head, tail, count int -} - -// New constructs and returns a new Queue. -func New() *Queue { - return &Queue{ - buf: make([]interface{}, minQueueLen), - } -} - -// Length returns the number of elements currently stored in the queue. -func (q *Queue) Length() int { - return q.count -} - -// resizes the queue to fit exactly twice its current contents -// this can result in shrinking if the queue is less than half-full -func (q *Queue) resize() { - newBuf := make([]interface{}, q.count<<1) - - if q.tail > q.head { - copy(newBuf, q.buf[q.head:q.tail]) - } else { - n := copy(newBuf, q.buf[q.head:]) - copy(newBuf[n:], q.buf[:q.tail]) - } - - q.head = 0 - q.tail = q.count - q.buf = newBuf -} - -// Add puts an element on the end of the queue. -func (q *Queue) Add(elem interface{}) { - if q.count == len(q.buf) { - q.resize() - } - - q.buf[q.tail] = elem - // bitwise modulus - q.tail = (q.tail + 1) & (len(q.buf) - 1) - q.count++ -} - -// Peek returns the element at the head of the queue. This call panics -// if the queue is empty. -func (q *Queue) Peek() interface{} { - if q.count <= 0 { - panic("queue: Peek() called on empty queue") - } - return q.buf[q.head] -} - -// Get returns the element at index i in the queue. If the index is -// invalid, the call will panic. This method accepts both positive and -// negative index values. Index 0 refers to the first element, and -// index -1 refers to the last. -func (q *Queue) Get(i int) interface{} { - // If indexing backwards, convert to positive index. - if i < 0 { - i += q.count - } - if i < 0 || i >= q.count { - panic("queue: Get() called with index out of range") - } - // bitwise modulus - return q.buf[(q.head+i)&(len(q.buf)-1)] -} - -// Remove removes and returns the element from the front of the queue. If the -// queue is empty, the call will panic. -func (q *Queue) Remove() interface{} { - if q.count <= 0 { - panic("queue: Remove() called on empty queue") - } - ret := q.buf[q.head] - q.buf[q.head] = nil - // bitwise modulus - q.head = (q.head + 1) & (len(q.buf) - 1) - q.count-- - // Resize down if buffer 1/4 full. - if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) { - q.resize() - } - return ret -} diff --git a/vendor/github.com/go-redis/redis/.gitignore b/vendor/github.com/go-redis/redis/.gitignore deleted file mode 100644 index ebfe903bcd..0000000000 --- a/vendor/github.com/go-redis/redis/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.rdb -testdata/*/ diff --git a/vendor/github.com/go-redis/redis/.travis.yml b/vendor/github.com/go-redis/redis/.travis.yml deleted file mode 100644 index c95b3e6c6c..0000000000 --- a/vendor/github.com/go-redis/redis/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -sudo: false -language: go - -services: - - redis-server - -go: - - 1.7.x - - 1.8.x - - 1.9.x - - tip - -matrix: - allow_failures: - - go: tip - -install: - - go get github.com/onsi/ginkgo - - go get github.com/onsi/gomega diff --git a/vendor/github.com/go-redis/redis/LICENSE b/vendor/github.com/go-redis/redis/LICENSE deleted file mode 100644 index 298bed9bea..0000000000 --- a/vendor/github.com/go-redis/redis/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2013 The github.com/go-redis/redis Authors. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-redis/redis/Makefile b/vendor/github.com/go-redis/redis/Makefile deleted file mode 100644 index 50fdc55a1a..0000000000 --- a/vendor/github.com/go-redis/redis/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -all: testdeps - go test ./... - go test ./... -short -race - go vet - -testdeps: testdata/redis/src/redis-server - -bench: testdeps - go test ./... -test.run=NONE -test.bench=. -test.benchmem - -.PHONY: all test testdeps bench - -testdata/redis: - mkdir -p $@ - wget -qO- https://github.com/antirez/redis/archive/unstable.tar.gz | tar xvz --strip-components=1 -C $@ - -testdata/redis/src/redis-server: testdata/redis - sed -i 's/libjemalloc.a/libjemalloc.a -lrt/g' $ -} - -func ExampleClient() { - err := client.Set("key", "value", 0).Err() - if err != nil { - panic(err) - } - - val, err := client.Get("key").Result() - if err != nil { - panic(err) - } - fmt.Println("key", val) - - val2, err := client.Get("key2").Result() - if err == redis.Nil { - fmt.Println("key2 does not exist") - } else if err != nil { - panic(err) - } else { - fmt.Println("key2", val2) - } - // Output: key value - // key2 does not exist -} -``` - -## Howto - -Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-examples) to get an idea how to use this package. - -## Look and feel - -Some corner cases: - - SET key value EX 10 NX - set, err := client.SetNX("key", "value", 10*time.Second).Result() - - SORT list LIMIT 0 2 ASC - vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() - - ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 - vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{ - Min: "-inf", - Max: "+inf", - Offset: 0, - Count: 2, - }).Result() - - ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM - vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result() - - EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" - vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() - -## Benchmark - -go-redis vs redigo: - -``` -BenchmarkSetGoRedis10Conns64Bytes-4 200000 7621 ns/op 210 B/op 6 allocs/op -BenchmarkSetGoRedis100Conns64Bytes-4 200000 7554 ns/op 210 B/op 6 allocs/op -BenchmarkSetGoRedis10Conns1KB-4 200000 7697 ns/op 210 B/op 6 allocs/op -BenchmarkSetGoRedis100Conns1KB-4 200000 7688 ns/op 210 B/op 6 allocs/op -BenchmarkSetGoRedis10Conns10KB-4 200000 9214 ns/op 210 B/op 6 allocs/op -BenchmarkSetGoRedis100Conns10KB-4 200000 9181 ns/op 210 B/op 6 allocs/op -BenchmarkSetGoRedis10Conns1MB-4 2000 583242 ns/op 2337 B/op 6 allocs/op -BenchmarkSetGoRedis100Conns1MB-4 2000 583089 ns/op 2338 B/op 6 allocs/op -BenchmarkSetRedigo10Conns64Bytes-4 200000 7576 ns/op 208 B/op 7 allocs/op -BenchmarkSetRedigo100Conns64Bytes-4 200000 7782 ns/op 208 B/op 7 allocs/op -BenchmarkSetRedigo10Conns1KB-4 200000 7958 ns/op 208 B/op 7 allocs/op -BenchmarkSetRedigo100Conns1KB-4 200000 7725 ns/op 208 B/op 7 allocs/op -BenchmarkSetRedigo10Conns10KB-4 100000 18442 ns/op 208 B/op 7 allocs/op -BenchmarkSetRedigo100Conns10KB-4 100000 18818 ns/op 208 B/op 7 allocs/op -BenchmarkSetRedigo10Conns1MB-4 2000 668829 ns/op 226 B/op 7 allocs/op -BenchmarkSetRedigo100Conns1MB-4 2000 679542 ns/op 226 B/op 7 allocs/op -``` - -Redis Cluster: - -``` -BenchmarkRedisPing-4 200000 6983 ns/op 116 B/op 4 allocs/op -BenchmarkRedisClusterPing-4 100000 11535 ns/op 117 B/op 4 allocs/op -``` - -## See also - -- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) -- [Golang msgpack](https://github.com/vmihailenco/msgpack) -- [Golang message task queue](https://github.com/go-msgqueue/msgqueue) diff --git a/vendor/github.com/go-redis/redis/cluster.go b/vendor/github.com/go-redis/redis/cluster.go deleted file mode 100644 index accdb3d272..0000000000 --- a/vendor/github.com/go-redis/redis/cluster.go +++ /dev/null @@ -1,1284 +0,0 @@ -package redis - -import ( - "fmt" - "math/rand" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/go-redis/redis/internal" - "github.com/go-redis/redis/internal/hashtag" - "github.com/go-redis/redis/internal/pool" - "github.com/go-redis/redis/internal/proto" -) - -var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") -var errNilClusterState = fmt.Errorf("redis: cannot load cluster slots") - -// ClusterOptions are used to configure a cluster client and should be -// passed to NewClusterClient. -type ClusterOptions struct { - // A seed list of host:port addresses of cluster nodes. - Addrs []string - - // The maximum number of retries before giving up. Command is retried - // on network errors and MOVED/ASK redirects. - // Default is 16. - MaxRedirects int - - // Enables read-only commands on slave nodes. - ReadOnly bool - // Allows routing read-only commands to the closest master or slave node. - RouteByLatency bool - - // Following options are copied from Options struct. - - OnConnect func(*Conn) error - - MaxRetries int - MinRetryBackoff time.Duration - MaxRetryBackoff time.Duration - Password string - - DialTimeout time.Duration - ReadTimeout time.Duration - WriteTimeout time.Duration - - // PoolSize applies per cluster node and not for the whole cluster. - PoolSize int - PoolTimeout time.Duration - IdleTimeout time.Duration - IdleCheckFrequency time.Duration -} - -func (opt *ClusterOptions) init() { - if opt.MaxRedirects == -1 { - opt.MaxRedirects = 0 - } else if opt.MaxRedirects == 0 { - opt.MaxRedirects = 16 - } - - if opt.RouteByLatency { - opt.ReadOnly = true - } - - switch opt.ReadTimeout { - case -1: - opt.ReadTimeout = 0 - case 0: - opt.ReadTimeout = 3 * time.Second - } - switch opt.WriteTimeout { - case -1: - opt.WriteTimeout = 0 - case 0: - opt.WriteTimeout = opt.ReadTimeout - } - - switch opt.MinRetryBackoff { - case -1: - opt.MinRetryBackoff = 0 - case 0: - opt.MinRetryBackoff = 8 * time.Millisecond - } - switch opt.MaxRetryBackoff { - case -1: - opt.MaxRetryBackoff = 0 - case 0: - opt.MaxRetryBackoff = 512 * time.Millisecond - } -} - -func (opt *ClusterOptions) clientOptions() *Options { - const disableIdleCheck = -1 - - return &Options{ - OnConnect: opt.OnConnect, - - MaxRetries: opt.MaxRetries, - MinRetryBackoff: opt.MinRetryBackoff, - MaxRetryBackoff: opt.MaxRetryBackoff, - Password: opt.Password, - readOnly: opt.ReadOnly, - - DialTimeout: opt.DialTimeout, - ReadTimeout: opt.ReadTimeout, - WriteTimeout: opt.WriteTimeout, - - PoolSize: opt.PoolSize, - PoolTimeout: opt.PoolTimeout, - IdleTimeout: opt.IdleTimeout, - - IdleCheckFrequency: disableIdleCheck, - } -} - -//------------------------------------------------------------------------------ - -type clusterNode struct { - Client *Client - Latency time.Duration - - loading time.Time - generation uint32 -} - -func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { - opt := clOpt.clientOptions() - opt.Addr = addr - node := clusterNode{ - Client: NewClient(opt), - } - - if clOpt.RouteByLatency { - node.updateLatency() - } - - return &node -} - -func (n *clusterNode) updateLatency() { - const probes = 10 - for i := 0; i < probes; i++ { - start := time.Now() - n.Client.Ping() - n.Latency += time.Since(start) - } - n.Latency = n.Latency / probes -} - -func (n *clusterNode) Loading() bool { - return !n.loading.IsZero() && time.Since(n.loading) < time.Minute -} - -func (n *clusterNode) Generation() uint32 { - return n.generation -} - -func (n *clusterNode) SetGeneration(gen uint32) { - if gen < n.generation { - panic("gen < n.generation") - } - n.generation = gen -} - -//------------------------------------------------------------------------------ - -type clusterNodes struct { - opt *ClusterOptions - - mu sync.RWMutex - addrs []string - nodes map[string]*clusterNode - closed bool - - generation uint32 -} - -func newClusterNodes(opt *ClusterOptions) *clusterNodes { - return &clusterNodes{ - opt: opt, - nodes: make(map[string]*clusterNode), - } -} - -func (c *clusterNodes) Close() error { - c.mu.Lock() - defer c.mu.Unlock() - - if c.closed { - return nil - } - c.closed = true - - var firstErr error - for _, node := range c.nodes { - if err := node.Client.Close(); err != nil && firstErr == nil { - firstErr = err - } - } - c.addrs = nil - c.nodes = nil - - return firstErr -} - -func (c *clusterNodes) Addrs() ([]string, error) { - c.mu.RLock() - closed := c.closed - addrs := c.addrs - c.mu.RUnlock() - - if closed { - return nil, pool.ErrClosed - } - if len(addrs) == 0 { - return nil, errClusterNoNodes - } - return addrs, nil -} - -func (c *clusterNodes) NextGeneration() uint32 { - c.generation++ - return c.generation -} - -// GC removes unused nodes. -func (c *clusterNodes) GC(generation uint32) { - var collected []*clusterNode - c.mu.Lock() - for i := 0; i < len(c.addrs); { - addr := c.addrs[i] - node := c.nodes[addr] - if node.Generation() >= generation { - i++ - continue - } - - c.addrs = append(c.addrs[:i], c.addrs[i+1:]...) - delete(c.nodes, addr) - collected = append(collected, node) - } - c.mu.Unlock() - - time.AfterFunc(time.Minute, func() { - for _, node := range collected { - _ = node.Client.Close() - } - }) -} - -func (c *clusterNodes) All() ([]*clusterNode, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - if c.closed { - return nil, pool.ErrClosed - } - - nodes := make([]*clusterNode, 0, len(c.nodes)) - for _, node := range c.nodes { - nodes = append(nodes, node) - } - return nodes, nil -} - -func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { - var node *clusterNode - var ok bool - - c.mu.RLock() - if !c.closed { - node, ok = c.nodes[addr] - } - c.mu.RUnlock() - if ok { - return node, nil - } - - c.mu.Lock() - defer c.mu.Unlock() - - if c.closed { - return nil, pool.ErrClosed - } - - node, ok = c.nodes[addr] - if ok { - return node, nil - } - - c.addrs = append(c.addrs, addr) - node = newClusterNode(c.opt, addr) - c.nodes[addr] = node - return node, nil -} - -func (c *clusterNodes) Random() (*clusterNode, error) { - addrs, err := c.Addrs() - if err != nil { - return nil, err - } - - var nodeErr error - for i := 0; i <= c.opt.MaxRedirects; i++ { - n := rand.Intn(len(addrs)) - node, err := c.GetOrCreate(addrs[n]) - if err != nil { - return nil, err - } - - nodeErr = node.Client.ClusterInfo().Err() - if nodeErr == nil { - return node, nil - } - } - return nil, nodeErr -} - -//------------------------------------------------------------------------------ - -type clusterState struct { - nodes *clusterNodes - masters []*clusterNode - slaves []*clusterNode - - slots [][]*clusterNode - - generation uint32 -} - -func newClusterState(nodes *clusterNodes, slots []ClusterSlot, origin string) (*clusterState, error) { - c := clusterState{ - nodes: nodes, - generation: nodes.NextGeneration(), - - slots: make([][]*clusterNode, hashtag.SlotNumber), - } - - isLoopbackOrigin := isLoopbackAddr(origin) - for _, slot := range slots { - var nodes []*clusterNode - for i, slotNode := range slot.Nodes { - addr := slotNode.Addr - if !isLoopbackOrigin && isLoopbackAddr(addr) { - addr = origin - } - - node, err := c.nodes.GetOrCreate(addr) - if err != nil { - return nil, err - } - - node.SetGeneration(c.generation) - nodes = append(nodes, node) - - if i == 0 { - c.masters = appendNode(c.masters, node) - } else { - c.slaves = appendNode(c.slaves, node) - } - } - - for i := slot.Start; i <= slot.End; i++ { - c.slots[i] = nodes - } - } - - return &c, nil -} - -func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { - nodes := c.slotNodes(slot) - if len(nodes) > 0 { - return nodes[0], nil - } - return c.nodes.Random() -} - -func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { - nodes := c.slotNodes(slot) - switch len(nodes) { - case 0: - return c.nodes.Random() - case 1: - return nodes[0], nil - case 2: - if slave := nodes[1]; !slave.Loading() { - return slave, nil - } - return nodes[0], nil - default: - var slave *clusterNode - for i := 0; i < 10; i++ { - n := rand.Intn(len(nodes)-1) + 1 - slave = nodes[n] - if !slave.Loading() { - break - } - } - return slave, nil - } -} - -func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { - const threshold = time.Millisecond - - nodes := c.slotNodes(slot) - if len(nodes) == 0 { - return c.nodes.Random() - } - - var node *clusterNode - for _, n := range nodes { - if n.Loading() { - continue - } - if node == nil || node.Latency-n.Latency > threshold { - node = n - } - } - return node, nil -} - -func (c *clusterState) slotNodes(slot int) []*clusterNode { - if slot >= 0 && slot < len(c.slots) { - return c.slots[slot] - } - return nil -} - -//------------------------------------------------------------------------------ - -// ClusterClient is a Redis Cluster client representing a pool of zero -// or more underlying connections. It's safe for concurrent use by -// multiple goroutines. -type ClusterClient struct { - cmdable - - opt *ClusterOptions - nodes *clusterNodes - _state atomic.Value - - cmdsInfoOnce internal.Once - cmdsInfo map[string]*CommandInfo - - // Reports whether slots reloading is in progress. - reloading uint32 -} - -// NewClusterClient returns a Redis Cluster client as described in -// http://redis.io/topics/cluster-spec. -func NewClusterClient(opt *ClusterOptions) *ClusterClient { - opt.init() - - c := &ClusterClient{ - opt: opt, - nodes: newClusterNodes(opt), - } - c.setProcessor(c.Process) - - // Add initial nodes. - for _, addr := range opt.Addrs { - _, _ = c.nodes.GetOrCreate(addr) - } - - // Preload cluster slots. - for i := 0; i < 10; i++ { - state, err := c.reloadState() - if err == nil { - c._state.Store(state) - break - } - } - - if opt.IdleCheckFrequency > 0 { - go c.reaper(opt.IdleCheckFrequency) - } - - return c -} - -// Options returns read-only Options that were used to create the client. -func (c *ClusterClient) Options() *ClusterOptions { - return c.opt -} - -func (c *ClusterClient) retryBackoff(attempt int) time.Duration { - return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) -} - -func (c *ClusterClient) state() (*clusterState, error) { - v := c._state.Load() - if v != nil { - return v.(*clusterState), nil - } - - _, err := c.nodes.Addrs() - if err != nil { - return nil, err - } - - c.lazyReloadState() - return nil, errNilClusterState -} - -func (c *ClusterClient) cmdInfo(name string) *CommandInfo { - err := c.cmdsInfoOnce.Do(func() error { - node, err := c.nodes.Random() - if err != nil { - return err - } - - cmdsInfo, err := node.Client.Command().Result() - if err != nil { - return err - } - - c.cmdsInfo = cmdsInfo - return nil - }) - if err != nil { - return nil - } - info := c.cmdsInfo[name] - if info == nil { - internal.Logf("info for cmd=%s not found", name) - } - return info -} - -func cmdSlot(cmd Cmder, pos int) int { - if pos == 0 { - return hashtag.RandomSlot() - } - firstKey := cmd.stringArg(pos) - return hashtag.Slot(firstKey) -} - -func (c *ClusterClient) cmdSlot(cmd Cmder) int { - cmdInfo := c.cmdInfo(cmd.Name()) - return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) -} - -func (c *ClusterClient) cmdSlotAndNode(state *clusterState, cmd Cmder) (int, *clusterNode, error) { - cmdInfo := c.cmdInfo(cmd.Name()) - slot := cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) - - if cmdInfo != nil && cmdInfo.ReadOnly && c.opt.ReadOnly { - if c.opt.RouteByLatency { - node, err := state.slotClosestNode(slot) - return slot, node, err - } - - node, err := state.slotSlaveNode(slot) - return slot, node, err - } - - node, err := state.slotMasterNode(slot) - return slot, node, err -} - -func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error { - if len(keys) == 0 { - return fmt.Errorf("redis: keys don't hash to the same slot") - } - - state, err := c.state() - if err != nil { - return err - } - - slot := hashtag.Slot(keys[0]) - for _, key := range keys[1:] { - if hashtag.Slot(key) != slot { - return fmt.Errorf("redis: Watch requires all keys to be in the same slot") - } - } - - node, err := state.slotMasterNode(slot) - if err != nil { - return err - } - - for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { - time.Sleep(c.retryBackoff(attempt)) - } - - err = node.Client.Watch(fn, keys...) - if err == nil { - break - } - - if internal.IsRetryableError(err, true) { - continue - } - - moved, ask, addr := internal.IsMovedError(err) - if moved || ask { - c.lazyReloadState() - node, err = c.nodes.GetOrCreate(addr) - if err != nil { - return err - } - continue - } - - if err == pool.ErrClosed { - node, err = state.slotMasterNode(slot) - if err != nil { - return err - } - } - - return err - } - - return err -} - -// Close closes the cluster client, releasing any open resources. -// -// It is rare to Close a ClusterClient, as the ClusterClient is meant -// to be long-lived and shared between many goroutines. -func (c *ClusterClient) Close() error { - return c.nodes.Close() -} - -func (c *ClusterClient) Process(cmd Cmder) error { - state, err := c.state() - if err != nil { - cmd.setErr(err) - return err - } - - _, node, err := c.cmdSlotAndNode(state, cmd) - if err != nil { - cmd.setErr(err) - return err - } - - var ask bool - for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { - time.Sleep(c.retryBackoff(attempt)) - } - - if ask { - pipe := node.Client.Pipeline() - _ = pipe.Process(NewCmd("ASKING")) - _ = pipe.Process(cmd) - _, err = pipe.Exec() - _ = pipe.Close() - ask = false - } else { - err = node.Client.Process(cmd) - } - - // If there is no error - we are done. - if err == nil { - break - } - - // If slave is loading - read from master. - if c.opt.ReadOnly && internal.IsLoadingError(err) { - // TODO: race - node.loading = time.Now() - continue - } - - if internal.IsRetryableError(err, true) { - var nodeErr error - node, nodeErr = c.nodes.Random() - if nodeErr != nil { - break - } - continue - } - - var moved bool - var addr string - moved, ask, addr = internal.IsMovedError(err) - if moved || ask { - c.lazyReloadState() - - var nodeErr error - node, nodeErr = c.nodes.GetOrCreate(addr) - if nodeErr != nil { - break - } - continue - } - - if err == pool.ErrClosed { - _, node, err = c.cmdSlotAndNode(state, cmd) - if err != nil { - cmd.setErr(err) - return err - } - } - - break - } - - return cmd.Err() -} - -// ForEachMaster concurrently calls the fn on each master node in the cluster. -// It returns the first error if any. -func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error { - state, err := c.state() - if err != nil { - return err - } - - var wg sync.WaitGroup - errCh := make(chan error, 1) - for _, master := range state.masters { - wg.Add(1) - go func(node *clusterNode) { - defer wg.Done() - err := fn(node.Client) - if err != nil { - select { - case errCh <- err: - default: - } - } - }(master) - } - wg.Wait() - - select { - case err := <-errCh: - return err - default: - return nil - } -} - -// ForEachSlave concurrently calls the fn on each slave node in the cluster. -// It returns the first error if any. -func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error { - state, err := c.state() - if err != nil { - return err - } - - var wg sync.WaitGroup - errCh := make(chan error, 1) - for _, slave := range state.slaves { - wg.Add(1) - go func(node *clusterNode) { - defer wg.Done() - err := fn(node.Client) - if err != nil { - select { - case errCh <- err: - default: - } - } - }(slave) - } - wg.Wait() - - select { - case err := <-errCh: - return err - default: - return nil - } -} - -// ForEachNode concurrently calls the fn on each known node in the cluster. -// It returns the first error if any. -func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error { - state, err := c.state() - if err != nil { - return err - } - - var wg sync.WaitGroup - errCh := make(chan error, 1) - worker := func(node *clusterNode) { - defer wg.Done() - err := fn(node.Client) - if err != nil { - select { - case errCh <- err: - default: - } - } - } - - for _, node := range state.masters { - wg.Add(1) - go worker(node) - } - for _, node := range state.slaves { - wg.Add(1) - go worker(node) - } - - wg.Wait() - select { - case err := <-errCh: - return err - default: - return nil - } -} - -// PoolStats returns accumulated connection pool stats. -func (c *ClusterClient) PoolStats() *PoolStats { - var acc PoolStats - - state, _ := c.state() - if state == nil { - return &acc - } - - for _, node := range state.masters { - s := node.Client.connPool.Stats() - acc.Hits += s.Hits - acc.Misses += s.Misses - acc.Timeouts += s.Timeouts - - acc.TotalConns += s.TotalConns - acc.FreeConns += s.FreeConns - acc.StaleConns += s.StaleConns - } - - for _, node := range state.slaves { - s := node.Client.connPool.Stats() - acc.Hits += s.Hits - acc.Misses += s.Misses - acc.Timeouts += s.Timeouts - - acc.TotalConns += s.TotalConns - acc.FreeConns += s.FreeConns - acc.StaleConns += s.StaleConns - } - - return &acc -} - -func (c *ClusterClient) lazyReloadState() { - if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { - return - } - - go func() { - defer atomic.StoreUint32(&c.reloading, 0) - - for { - state, err := c.reloadState() - if err == pool.ErrClosed { - return - } - - if err != nil { - time.Sleep(time.Millisecond) - continue - } - - c._state.Store(state) - time.Sleep(5 * time.Second) - c.nodes.GC(state.generation) - break - } - }() -} - -// Not thread-safe. -func (c *ClusterClient) reloadState() (*clusterState, error) { - node, err := c.nodes.Random() - if err != nil { - return nil, err - } - - slots, err := node.Client.ClusterSlots().Result() - if err != nil { - return nil, err - } - - return newClusterState(c.nodes, slots, node.Client.opt.Addr) -} - -// reaper closes idle connections to the cluster. -func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) { - ticker := time.NewTicker(idleCheckFrequency) - defer ticker.Stop() - - for range ticker.C { - nodes, err := c.nodes.All() - if err != nil { - break - } - - for _, node := range nodes { - _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns() - if err != nil { - internal.Logf("ReapStaleConns failed: %s", err) - } - } - } -} - -func (c *ClusterClient) Pipeline() Pipeliner { - pipe := Pipeline{ - exec: c.pipelineExec, - } - pipe.setProcessor(pipe.Process) - return &pipe -} - -func (c *ClusterClient) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { - return c.Pipeline().Pipelined(fn) -} - -func (c *ClusterClient) pipelineExec(cmds []Cmder) error { - cmdsMap, err := c.mapCmdsByNode(cmds) - if err != nil { - setCmdsErr(cmds, err) - return err - } - - for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { - time.Sleep(c.retryBackoff(attempt)) - } - - failedCmds := make(map[*clusterNode][]Cmder) - - for node, cmds := range cmdsMap { - cn, _, err := node.Client.getConn() - if err != nil { - if err == pool.ErrClosed { - c.remapCmds(cmds, failedCmds) - } else { - setCmdsErr(cmds, err) - } - continue - } - - err = c.pipelineProcessCmds(node, cn, cmds, failedCmds) - if err == nil || internal.IsRedisError(err) { - _ = node.Client.connPool.Put(cn) - } else { - _ = node.Client.connPool.Remove(cn) - } - } - - if len(failedCmds) == 0 { - break - } - cmdsMap = failedCmds - } - - return firstCmdsErr(cmds) -} - -func (c *ClusterClient) mapCmdsByNode(cmds []Cmder) (map[*clusterNode][]Cmder, error) { - state, err := c.state() - if err != nil { - setCmdsErr(cmds, err) - return nil, err - } - - cmdsMap := make(map[*clusterNode][]Cmder) - for _, cmd := range cmds { - slot := c.cmdSlot(cmd) - node, err := state.slotMasterNode(slot) - if err != nil { - return nil, err - } - cmdsMap[node] = append(cmdsMap[node], cmd) - } - return cmdsMap, nil -} - -func (c *ClusterClient) remapCmds(cmds []Cmder, failedCmds map[*clusterNode][]Cmder) { - remappedCmds, err := c.mapCmdsByNode(cmds) - if err != nil { - setCmdsErr(cmds, err) - return - } - - for node, cmds := range remappedCmds { - failedCmds[node] = cmds - } -} - -func (c *ClusterClient) pipelineProcessCmds( - node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder, -) error { - cn.SetWriteTimeout(c.opt.WriteTimeout) - if err := writeCmd(cn, cmds...); err != nil { - setCmdsErr(cmds, err) - failedCmds[node] = cmds - return err - } - - // Set read timeout for all commands. - cn.SetReadTimeout(c.opt.ReadTimeout) - - return c.pipelineReadCmds(cn, cmds, failedCmds) -} - -func (c *ClusterClient) pipelineReadCmds( - cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder, -) error { - for _, cmd := range cmds { - err := cmd.readReply(cn) - if err == nil { - continue - } - - if c.checkMovedErr(cmd, err, failedCmds) { - continue - } - - if internal.IsRedisError(err) { - continue - } - - return err - } - return nil -} - -func (c *ClusterClient) checkMovedErr( - cmd Cmder, err error, failedCmds map[*clusterNode][]Cmder, -) bool { - moved, ask, addr := internal.IsMovedError(err) - - if moved { - c.lazyReloadState() - - node, err := c.nodes.GetOrCreate(addr) - if err != nil { - return false - } - - failedCmds[node] = append(failedCmds[node], cmd) - return true - } - - if ask { - node, err := c.nodes.GetOrCreate(addr) - if err != nil { - return false - } - - failedCmds[node] = append(failedCmds[node], NewCmd("ASKING"), cmd) - return true - } - - return false -} - -// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. -func (c *ClusterClient) TxPipeline() Pipeliner { - pipe := Pipeline{ - exec: c.txPipelineExec, - } - pipe.setProcessor(pipe.Process) - return &pipe -} - -func (c *ClusterClient) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { - return c.TxPipeline().Pipelined(fn) -} - -func (c *ClusterClient) txPipelineExec(cmds []Cmder) error { - state, err := c.state() - if err != nil { - return err - } - - cmdsMap := c.mapCmdsBySlot(cmds) - for slot, cmds := range cmdsMap { - node, err := state.slotMasterNode(slot) - if err != nil { - setCmdsErr(cmds, err) - continue - } - cmdsMap := map[*clusterNode][]Cmder{node: cmds} - - for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { - time.Sleep(c.retryBackoff(attempt)) - } - - failedCmds := make(map[*clusterNode][]Cmder) - - for node, cmds := range cmdsMap { - cn, _, err := node.Client.getConn() - if err != nil { - if err == pool.ErrClosed { - c.remapCmds(cmds, failedCmds) - } else { - setCmdsErr(cmds, err) - } - continue - } - - err = c.txPipelineProcessCmds(node, cn, cmds, failedCmds) - if err == nil || internal.IsRedisError(err) { - _ = node.Client.connPool.Put(cn) - } else { - _ = node.Client.connPool.Remove(cn) - } - } - - if len(failedCmds) == 0 { - break - } - cmdsMap = failedCmds - } - } - - return firstCmdsErr(cmds) -} - -func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { - cmdsMap := make(map[int][]Cmder) - for _, cmd := range cmds { - slot := c.cmdSlot(cmd) - cmdsMap[slot] = append(cmdsMap[slot], cmd) - } - return cmdsMap -} - -func (c *ClusterClient) txPipelineProcessCmds( - node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder, -) error { - cn.SetWriteTimeout(c.opt.WriteTimeout) - if err := txPipelineWriteMulti(cn, cmds); err != nil { - setCmdsErr(cmds, err) - failedCmds[node] = cmds - return err - } - - // Set read timeout for all commands. - cn.SetReadTimeout(c.opt.ReadTimeout) - - if err := c.txPipelineReadQueued(cn, cmds, failedCmds); err != nil { - setCmdsErr(cmds, err) - return err - } - - return pipelineReadCmds(cn, cmds) -} - -func (c *ClusterClient) txPipelineReadQueued( - cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder, -) error { - // Parse queued replies. - var statusCmd StatusCmd - if err := statusCmd.readReply(cn); err != nil { - return err - } - - for _, cmd := range cmds { - err := statusCmd.readReply(cn) - if err == nil { - continue - } - - if c.checkMovedErr(cmd, err, failedCmds) || internal.IsRedisError(err) { - continue - } - - return err - } - - // Parse number of replies. - line, err := cn.Rd.ReadLine() - if err != nil { - if err == Nil { - err = TxFailedErr - } - return err - } - - switch line[0] { - case proto.ErrorReply: - err := proto.ParseErrorReply(line) - for _, cmd := range cmds { - if !c.checkMovedErr(cmd, err, failedCmds) { - break - } - } - return err - case proto.ArrayReply: - // ok - default: - err := fmt.Errorf("redis: expected '*', but got line %q", line) - return err - } - - return nil -} - -func (c *ClusterClient) pubSub(channels []string) *PubSub { - opt := c.opt.clientOptions() - - var node *clusterNode - return &PubSub{ - opt: opt, - - newConn: func(channels []string) (*pool.Conn, error) { - if node == nil { - var slot int - if len(channels) > 0 { - slot = hashtag.Slot(channels[0]) - } else { - slot = -1 - } - - state, err := c.state() - if err != nil { - return nil, err - } - - masterNode, err := state.slotMasterNode(slot) - if err != nil { - return nil, err - } - node = masterNode - } - return node.Client.newConn() - }, - closeConn: func(cn *pool.Conn) error { - return node.Client.connPool.CloseConn(cn) - }, - } -} - -// Subscribe subscribes the client to the specified channels. -// Channels can be omitted to create empty subscription. -func (c *ClusterClient) Subscribe(channels ...string) *PubSub { - pubsub := c.pubSub(channels) - if len(channels) > 0 { - _ = pubsub.Subscribe(channels...) - } - return pubsub -} - -// PSubscribe subscribes the client to the given patterns. -// Patterns can be omitted to create empty subscription. -func (c *ClusterClient) PSubscribe(channels ...string) *PubSub { - pubsub := c.pubSub(channels) - if len(channels) > 0 { - _ = pubsub.PSubscribe(channels...) - } - return pubsub -} - -func isLoopbackAddr(addr string) bool { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return false - } - - ip := net.ParseIP(host) - if ip == nil { - return false - } - - return ip.IsLoopback() -} - -func appendNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { - for _, n := range nodes { - if n == node { - return nodes - } - } - return append(nodes, node) -} diff --git a/vendor/github.com/go-redis/redis/cluster_commands.go b/vendor/github.com/go-redis/redis/cluster_commands.go deleted file mode 100644 index dff62c902d..0000000000 --- a/vendor/github.com/go-redis/redis/cluster_commands.go +++ /dev/null @@ -1,22 +0,0 @@ -package redis - -import "sync/atomic" - -func (c *ClusterClient) DBSize() *IntCmd { - cmd := NewIntCmd("dbsize") - var size int64 - err := c.ForEachMaster(func(master *Client) error { - n, err := master.DBSize().Result() - if err != nil { - return err - } - atomic.AddInt64(&size, n) - return nil - }) - if err != nil { - cmd.setErr(err) - return cmd - } - cmd.val = size - return cmd -} diff --git a/vendor/github.com/go-redis/redis/command.go b/vendor/github.com/go-redis/redis/command.go deleted file mode 100644 index 598ed98002..0000000000 --- a/vendor/github.com/go-redis/redis/command.go +++ /dev/null @@ -1,1024 +0,0 @@ -package redis - -import ( - "bytes" - "fmt" - "strconv" - "strings" - "time" - - "github.com/go-redis/redis/internal" - "github.com/go-redis/redis/internal/pool" - "github.com/go-redis/redis/internal/proto" -) - -type Cmder interface { - Name() string - Args() []interface{} - stringArg(int) string - - readReply(*pool.Conn) error - setErr(error) - - readTimeout() *time.Duration - - Err() error - fmt.Stringer -} - -func setCmdsErr(cmds []Cmder, e error) { - for _, cmd := range cmds { - if cmd.Err() == nil { - cmd.setErr(e) - } - } -} - -func firstCmdsErr(cmds []Cmder) error { - for _, cmd := range cmds { - if err := cmd.Err(); err != nil { - return err - } - } - return nil -} - -func writeCmd(cn *pool.Conn, cmds ...Cmder) error { - cn.Wb.Reset() - for _, cmd := range cmds { - if err := cn.Wb.Append(cmd.Args()); err != nil { - return err - } - } - - _, err := cn.Write(cn.Wb.Bytes()) - return err -} - -func cmdString(cmd Cmder, val interface{}) string { - var ss []string - for _, arg := range cmd.Args() { - ss = append(ss, fmt.Sprint(arg)) - } - s := strings.Join(ss, " ") - if err := cmd.Err(); err != nil { - return s + ": " + err.Error() - } - if val != nil { - switch vv := val.(type) { - case []byte: - return s + ": " + string(vv) - default: - return s + ": " + fmt.Sprint(val) - } - } - return s - -} - -func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { - switch cmd.Name() { - case "eval", "evalsha": - if cmd.stringArg(2) != "0" { - return 3 - } else { - return 0 - } - case "publish": - return 1 - } - if info == nil { - return 0 - } - return int(info.FirstKeyPos) -} - -//------------------------------------------------------------------------------ - -type baseCmd struct { - _args []interface{} - err error - - _readTimeout *time.Duration -} - -var _ Cmder = (*Cmd)(nil) - -func (cmd *baseCmd) Err() error { - return cmd.err -} - -func (cmd *baseCmd) Args() []interface{} { - return cmd._args -} - -func (cmd *baseCmd) stringArg(pos int) string { - if pos < 0 || pos >= len(cmd._args) { - return "" - } - s, _ := cmd._args[pos].(string) - return s -} - -func (cmd *baseCmd) Name() string { - if len(cmd._args) > 0 { - // Cmd name must be lower cased. - s := internal.ToLower(cmd.stringArg(0)) - cmd._args[0] = s - return s - } - return "" -} - -func (cmd *baseCmd) readTimeout() *time.Duration { - return cmd._readTimeout -} - -func (cmd *baseCmd) setReadTimeout(d time.Duration) { - cmd._readTimeout = &d -} - -func (cmd *baseCmd) setErr(e error) { - cmd.err = e -} - -//------------------------------------------------------------------------------ - -type Cmd struct { - baseCmd - - val interface{} -} - -func NewCmd(args ...interface{}) *Cmd { - return &Cmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *Cmd) Val() interface{} { - return cmd.val -} - -func (cmd *Cmd) Result() (interface{}, error) { - return cmd.val, cmd.err -} - -func (cmd *Cmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *Cmd) readReply(cn *pool.Conn) error { - cmd.val, cmd.err = cn.Rd.ReadReply(sliceParser) - if cmd.err != nil { - return cmd.err - } - if b, ok := cmd.val.([]byte); ok { - // Bytes must be copied, because underlying memory is reused. - cmd.val = string(b) - } - return nil -} - -//------------------------------------------------------------------------------ - -type SliceCmd struct { - baseCmd - - val []interface{} -} - -var _ Cmder = (*SliceCmd)(nil) - -func NewSliceCmd(args ...interface{}) *SliceCmd { - return &SliceCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *SliceCmd) Val() []interface{} { - return cmd.val -} - -func (cmd *SliceCmd) Result() ([]interface{}, error) { - return cmd.val, cmd.err -} - -func (cmd *SliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *SliceCmd) readReply(cn *pool.Conn) error { - var v interface{} - v, cmd.err = cn.Rd.ReadArrayReply(sliceParser) - if cmd.err != nil { - return cmd.err - } - cmd.val = v.([]interface{}) - return nil -} - -//------------------------------------------------------------------------------ - -type StatusCmd struct { - baseCmd - - val string -} - -var _ Cmder = (*StatusCmd)(nil) - -func NewStatusCmd(args ...interface{}) *StatusCmd { - return &StatusCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *StatusCmd) Val() string { - return cmd.val -} - -func (cmd *StatusCmd) Result() (string, error) { - return cmd.val, cmd.err -} - -func (cmd *StatusCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StatusCmd) readReply(cn *pool.Conn) error { - cmd.val, cmd.err = cn.Rd.ReadStringReply() - return cmd.err -} - -//------------------------------------------------------------------------------ - -type IntCmd struct { - baseCmd - - val int64 -} - -var _ Cmder = (*IntCmd)(nil) - -func NewIntCmd(args ...interface{}) *IntCmd { - return &IntCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *IntCmd) Val() int64 { - return cmd.val -} - -func (cmd *IntCmd) Result() (int64, error) { - return cmd.val, cmd.err -} - -func (cmd *IntCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *IntCmd) readReply(cn *pool.Conn) error { - cmd.val, cmd.err = cn.Rd.ReadIntReply() - return cmd.err -} - -//------------------------------------------------------------------------------ - -type DurationCmd struct { - baseCmd - - val time.Duration - precision time.Duration -} - -var _ Cmder = (*DurationCmd)(nil) - -func NewDurationCmd(precision time.Duration, args ...interface{}) *DurationCmd { - return &DurationCmd{ - baseCmd: baseCmd{_args: args}, - precision: precision, - } -} - -func (cmd *DurationCmd) Val() time.Duration { - return cmd.val -} - -func (cmd *DurationCmd) Result() (time.Duration, error) { - return cmd.val, cmd.err -} - -func (cmd *DurationCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *DurationCmd) readReply(cn *pool.Conn) error { - var n int64 - n, cmd.err = cn.Rd.ReadIntReply() - if cmd.err != nil { - return cmd.err - } - cmd.val = time.Duration(n) * cmd.precision - return nil -} - -//------------------------------------------------------------------------------ - -type TimeCmd struct { - baseCmd - - val time.Time -} - -var _ Cmder = (*TimeCmd)(nil) - -func NewTimeCmd(args ...interface{}) *TimeCmd { - return &TimeCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *TimeCmd) Val() time.Time { - return cmd.val -} - -func (cmd *TimeCmd) Result() (time.Time, error) { - return cmd.val, cmd.err -} - -func (cmd *TimeCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *TimeCmd) readReply(cn *pool.Conn) error { - var v interface{} - v, cmd.err = cn.Rd.ReadArrayReply(timeParser) - if cmd.err != nil { - return cmd.err - } - cmd.val = v.(time.Time) - return nil -} - -//------------------------------------------------------------------------------ - -type BoolCmd struct { - baseCmd - - val bool -} - -var _ Cmder = (*BoolCmd)(nil) - -func NewBoolCmd(args ...interface{}) *BoolCmd { - return &BoolCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *BoolCmd) Val() bool { - return cmd.val -} - -func (cmd *BoolCmd) Result() (bool, error) { - return cmd.val, cmd.err -} - -func (cmd *BoolCmd) String() string { - return cmdString(cmd, cmd.val) -} - -var ok = []byte("OK") - -func (cmd *BoolCmd) readReply(cn *pool.Conn) error { - var v interface{} - v, cmd.err = cn.Rd.ReadReply(nil) - // `SET key value NX` returns nil when key already exists. But - // `SETNX key value` returns bool (0/1). So convert nil to bool. - // TODO: is this okay? - if cmd.err == Nil { - cmd.val = false - cmd.err = nil - return nil - } - if cmd.err != nil { - return cmd.err - } - switch v := v.(type) { - case int64: - cmd.val = v == 1 - return nil - case []byte: - cmd.val = bytes.Equal(v, ok) - return nil - default: - cmd.err = fmt.Errorf("got %T, wanted int64 or string", v) - return cmd.err - } -} - -//------------------------------------------------------------------------------ - -type StringCmd struct { - baseCmd - - val []byte -} - -var _ Cmder = (*StringCmd)(nil) - -func NewStringCmd(args ...interface{}) *StringCmd { - return &StringCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *StringCmd) Val() string { - return internal.BytesToString(cmd.val) -} - -func (cmd *StringCmd) Result() (string, error) { - return cmd.Val(), cmd.err -} - -func (cmd *StringCmd) Bytes() ([]byte, error) { - return cmd.val, cmd.err -} - -func (cmd *StringCmd) Int64() (int64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return strconv.ParseInt(cmd.Val(), 10, 64) -} - -func (cmd *StringCmd) Uint64() (uint64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return strconv.ParseUint(cmd.Val(), 10, 64) -} - -func (cmd *StringCmd) Float64() (float64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return strconv.ParseFloat(cmd.Val(), 64) -} - -func (cmd *StringCmd) Scan(val interface{}) error { - if cmd.err != nil { - return cmd.err - } - return proto.Scan(cmd.val, val) -} - -func (cmd *StringCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringCmd) readReply(cn *pool.Conn) error { - cmd.val, cmd.err = cn.Rd.ReadBytesReply() - return cmd.err -} - -//------------------------------------------------------------------------------ - -type FloatCmd struct { - baseCmd - - val float64 -} - -var _ Cmder = (*FloatCmd)(nil) - -func NewFloatCmd(args ...interface{}) *FloatCmd { - return &FloatCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *FloatCmd) Val() float64 { - return cmd.val -} - -func (cmd *FloatCmd) Result() (float64, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *FloatCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *FloatCmd) readReply(cn *pool.Conn) error { - cmd.val, cmd.err = cn.Rd.ReadFloatReply() - return cmd.err -} - -//------------------------------------------------------------------------------ - -type StringSliceCmd struct { - baseCmd - - val []string -} - -var _ Cmder = (*StringSliceCmd)(nil) - -func NewStringSliceCmd(args ...interface{}) *StringSliceCmd { - return &StringSliceCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *StringSliceCmd) Val() []string { - return cmd.val -} - -func (cmd *StringSliceCmd) Result() ([]string, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *StringSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { - return proto.ScanSlice(cmd.Val(), container) -} - -func (cmd *StringSliceCmd) readReply(cn *pool.Conn) error { - var v interface{} - v, cmd.err = cn.Rd.ReadArrayReply(stringSliceParser) - if cmd.err != nil { - return cmd.err - } - cmd.val = v.([]string) - return nil -} - -//------------------------------------------------------------------------------ - -type BoolSliceCmd struct { - baseCmd - - val []bool -} - -var _ Cmder = (*BoolSliceCmd)(nil) - -func NewBoolSliceCmd(args ...interface{}) *BoolSliceCmd { - return &BoolSliceCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *BoolSliceCmd) Val() []bool { - return cmd.val -} - -func (cmd *BoolSliceCmd) Result() ([]bool, error) { - return cmd.val, cmd.err -} - -func (cmd *BoolSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *BoolSliceCmd) readReply(cn *pool.Conn) error { - var v interface{} - v, cmd.err = cn.Rd.ReadArrayReply(boolSliceParser) - if cmd.err != nil { - return cmd.err - } - cmd.val = v.([]bool) - return nil -} - -//------------------------------------------------------------------------------ - -type StringStringMapCmd struct { - baseCmd - - val map[string]string -} - -var _ Cmder = (*StringStringMapCmd)(nil) - -func NewStringStringMapCmd(args ...interface{}) *StringStringMapCmd { - return &StringStringMapCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *StringStringMapCmd) Val() map[string]string { - return cmd.val -} - -func (cmd *StringStringMapCmd) Result() (map[string]string, error) { - return cmd.val, cmd.err -} - -func (cmd *StringStringMapCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringStringMapCmd) readReply(cn *pool.Conn) error { - var v interface{} - v, cmd.err = cn.Rd.ReadArrayReply(stringStringMapParser) - if cmd.err != nil { - return cmd.err - } - cmd.val = v.(map[string]string) - return nil -} - -//------------------------------------------------------------------------------ - -type StringIntMapCmd struct { - baseCmd - - val map[string]int64 -} - -var _ Cmder = (*StringIntMapCmd)(nil) - -func NewStringIntMapCmd(args ...interface{}) *StringIntMapCmd { - return &StringIntMapCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *StringIntMapCmd) Val() map[string]int64 { - return cmd.val -} - -func (cmd *StringIntMapCmd) Result() (map[string]int64, error) { - return cmd.val, cmd.err -} - -func (cmd *StringIntMapCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringIntMapCmd) readReply(cn *pool.Conn) error { - var v interface{} - v, cmd.err = cn.Rd.ReadArrayReply(stringIntMapParser) - if cmd.err != nil { - return cmd.err - } - cmd.val = v.(map[string]int64) - return nil -} - -//------------------------------------------------------------------------------ - -type StringStructMapCmd struct { - baseCmd - - val map[string]struct{} -} - -var _ Cmder = (*StringStructMapCmd)(nil) - -func NewStringStructMapCmd(args ...interface{}) *StringStructMapCmd { - return &StringStructMapCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *StringStructMapCmd) Val() map[string]struct{} { - return cmd.val -} - -func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { - return cmd.val, cmd.err -} - -func (cmd *StringStructMapCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringStructMapCmd) readReply(cn *pool.Conn) error { - var v interface{} - v, cmd.err = cn.Rd.ReadArrayReply(stringStructMapParser) - if cmd.err != nil { - return cmd.err - } - cmd.val = v.(map[string]struct{}) - return nil -} - -//------------------------------------------------------------------------------ - -type ZSliceCmd struct { - baseCmd - - val []Z -} - -var _ Cmder = (*ZSliceCmd)(nil) - -func NewZSliceCmd(args ...interface{}) *ZSliceCmd { - return &ZSliceCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *ZSliceCmd) Val() []Z { - return cmd.val -} - -func (cmd *ZSliceCmd) Result() ([]Z, error) { - return cmd.val, cmd.err -} - -func (cmd *ZSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *ZSliceCmd) readReply(cn *pool.Conn) error { - var v interface{} - v, cmd.err = cn.Rd.ReadArrayReply(zSliceParser) - if cmd.err != nil { - return cmd.err - } - cmd.val = v.([]Z) - return nil -} - -//------------------------------------------------------------------------------ - -type ScanCmd struct { - baseCmd - - page []string - cursor uint64 - - process func(cmd Cmder) error -} - -var _ Cmder = (*ScanCmd)(nil) - -func NewScanCmd(process func(cmd Cmder) error, args ...interface{}) *ScanCmd { - return &ScanCmd{ - baseCmd: baseCmd{_args: args}, - process: process, - } -} - -func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { - return cmd.page, cmd.cursor -} - -func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { - return cmd.page, cmd.cursor, cmd.err -} - -func (cmd *ScanCmd) String() string { - return cmdString(cmd, cmd.page) -} - -func (cmd *ScanCmd) readReply(cn *pool.Conn) error { - cmd.page, cmd.cursor, cmd.err = cn.Rd.ReadScanReply() - return cmd.err -} - -// Iterator creates a new ScanIterator. -func (cmd *ScanCmd) Iterator() *ScanIterator { - return &ScanIterator{ - cmd: cmd, - } -} - -//------------------------------------------------------------------------------ - -type ClusterNode struct { - Id string - Addr string -} - -type ClusterSlot struct { - Start int - End int - Nodes []ClusterNode -} - -type ClusterSlotsCmd struct { - baseCmd - - val []ClusterSlot -} - -var _ Cmder = (*ClusterSlotsCmd)(nil) - -func NewClusterSlotsCmd(args ...interface{}) *ClusterSlotsCmd { - return &ClusterSlotsCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { - return cmd.val -} - -func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *ClusterSlotsCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *ClusterSlotsCmd) readReply(cn *pool.Conn) error { - var v interface{} - v, cmd.err = cn.Rd.ReadArrayReply(clusterSlotsParser) - if cmd.err != nil { - return cmd.err - } - cmd.val = v.([]ClusterSlot) - return nil -} - -//------------------------------------------------------------------------------ - -// GeoLocation is used with GeoAdd to add geospatial location. -type GeoLocation struct { - Name string - Longitude, Latitude, Dist float64 - GeoHash int64 -} - -// GeoRadiusQuery is used with GeoRadius to query geospatial index. -type GeoRadiusQuery struct { - Radius float64 - // Can be m, km, ft, or mi. Default is km. - Unit string - WithCoord bool - WithDist bool - WithGeoHash bool - Count int - // Can be ASC or DESC. Default is no sort order. - Sort string - Store string - StoreDist string -} - -type GeoLocationCmd struct { - baseCmd - - q *GeoRadiusQuery - locations []GeoLocation -} - -var _ Cmder = (*GeoLocationCmd)(nil) - -func NewGeoLocationCmd(q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { - args = append(args, q.Radius) - if q.Unit != "" { - args = append(args, q.Unit) - } else { - args = append(args, "km") - } - if q.WithCoord { - args = append(args, "withcoord") - } - if q.WithDist { - args = append(args, "withdist") - } - if q.WithGeoHash { - args = append(args, "withhash") - } - if q.Count > 0 { - args = append(args, "count", q.Count) - } - if q.Sort != "" { - args = append(args, q.Sort) - } - if q.Store != "" { - args = append(args, "store") - args = append(args, q.Store) - } - if q.StoreDist != "" { - args = append(args, "storedist") - args = append(args, q.StoreDist) - } - return &GeoLocationCmd{ - baseCmd: baseCmd{_args: args}, - q: q, - } -} - -func (cmd *GeoLocationCmd) Val() []GeoLocation { - return cmd.locations -} - -func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { - return cmd.locations, cmd.err -} - -func (cmd *GeoLocationCmd) String() string { - return cmdString(cmd, cmd.locations) -} - -func (cmd *GeoLocationCmd) readReply(cn *pool.Conn) error { - var v interface{} - v, cmd.err = cn.Rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q)) - if cmd.err != nil { - return cmd.err - } - cmd.locations = v.([]GeoLocation) - return nil -} - -//------------------------------------------------------------------------------ - -type GeoPos struct { - Longitude, Latitude float64 -} - -type GeoPosCmd struct { - baseCmd - - positions []*GeoPos -} - -var _ Cmder = (*GeoPosCmd)(nil) - -func NewGeoPosCmd(args ...interface{}) *GeoPosCmd { - return &GeoPosCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *GeoPosCmd) Val() []*GeoPos { - return cmd.positions -} - -func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *GeoPosCmd) String() string { - return cmdString(cmd, cmd.positions) -} - -func (cmd *GeoPosCmd) readReply(cn *pool.Conn) error { - var v interface{} - v, cmd.err = cn.Rd.ReadArrayReply(geoPosSliceParser) - if cmd.err != nil { - return cmd.err - } - cmd.positions = v.([]*GeoPos) - return nil -} - -//------------------------------------------------------------------------------ - -type CommandInfo struct { - Name string - Arity int8 - Flags []string - FirstKeyPos int8 - LastKeyPos int8 - StepCount int8 - ReadOnly bool -} - -type CommandsInfoCmd struct { - baseCmd - - val map[string]*CommandInfo -} - -var _ Cmder = (*CommandsInfoCmd)(nil) - -func NewCommandsInfoCmd(args ...interface{}) *CommandsInfoCmd { - return &CommandsInfoCmd{ - baseCmd: baseCmd{_args: args}, - } -} - -func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { - return cmd.val -} - -func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *CommandsInfoCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *CommandsInfoCmd) readReply(cn *pool.Conn) error { - var v interface{} - v, cmd.err = cn.Rd.ReadArrayReply(commandInfoSliceParser) - if cmd.err != nil { - return cmd.err - } - cmd.val = v.(map[string]*CommandInfo) - return nil -} diff --git a/vendor/github.com/go-redis/redis/commands.go b/vendor/github.com/go-redis/redis/commands.go deleted file mode 100644 index 569342cfa1..0000000000 --- a/vendor/github.com/go-redis/redis/commands.go +++ /dev/null @@ -1,2147 +0,0 @@ -package redis - -import ( - "io" - "time" - - "github.com/go-redis/redis/internal" -) - -func readTimeout(timeout time.Duration) time.Duration { - if timeout == 0 { - return 0 - } - return timeout + 10*time.Second -} - -func usePrecise(dur time.Duration) bool { - return dur < time.Second || dur%time.Second != 0 -} - -func formatMs(dur time.Duration) int64 { - if dur > 0 && dur < time.Millisecond { - internal.Logf( - "specified duration is %s, but minimal supported value is %s", - dur, time.Millisecond, - ) - } - return int64(dur / time.Millisecond) -} - -func formatSec(dur time.Duration) int64 { - if dur > 0 && dur < time.Second { - internal.Logf( - "specified duration is %s, but minimal supported value is %s", - dur, time.Second, - ) - } - return int64(dur / time.Second) -} - -type Cmdable interface { - Pipeline() Pipeliner - Pipelined(fn func(Pipeliner) error) ([]Cmder, error) - - TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) - TxPipeline() Pipeliner - - ClientGetName() *StringCmd - Echo(message interface{}) *StringCmd - Ping() *StatusCmd - Quit() *StatusCmd - Del(keys ...string) *IntCmd - Unlink(keys ...string) *IntCmd - Dump(key string) *StringCmd - Exists(keys ...string) *IntCmd - Expire(key string, expiration time.Duration) *BoolCmd - ExpireAt(key string, tm time.Time) *BoolCmd - Keys(pattern string) *StringSliceCmd - Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd - Move(key string, db int64) *BoolCmd - ObjectRefCount(key string) *IntCmd - ObjectEncoding(key string) *StringCmd - ObjectIdleTime(key string) *DurationCmd - Persist(key string) *BoolCmd - PExpire(key string, expiration time.Duration) *BoolCmd - PExpireAt(key string, tm time.Time) *BoolCmd - PTTL(key string) *DurationCmd - RandomKey() *StringCmd - Rename(key, newkey string) *StatusCmd - RenameNX(key, newkey string) *BoolCmd - Restore(key string, ttl time.Duration, value string) *StatusCmd - RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd - Sort(key string, sort Sort) *StringSliceCmd - SortInterfaces(key string, sort Sort) *SliceCmd - TTL(key string) *DurationCmd - Type(key string) *StatusCmd - Scan(cursor uint64, match string, count int64) *ScanCmd - SScan(key string, cursor uint64, match string, count int64) *ScanCmd - HScan(key string, cursor uint64, match string, count int64) *ScanCmd - ZScan(key string, cursor uint64, match string, count int64) *ScanCmd - Append(key, value string) *IntCmd - BitCount(key string, bitCount *BitCount) *IntCmd - BitOpAnd(destKey string, keys ...string) *IntCmd - BitOpOr(destKey string, keys ...string) *IntCmd - BitOpXor(destKey string, keys ...string) *IntCmd - BitOpNot(destKey string, key string) *IntCmd - BitPos(key string, bit int64, pos ...int64) *IntCmd - Decr(key string) *IntCmd - DecrBy(key string, decrement int64) *IntCmd - Get(key string) *StringCmd - GetBit(key string, offset int64) *IntCmd - GetRange(key string, start, end int64) *StringCmd - GetSet(key string, value interface{}) *StringCmd - Incr(key string) *IntCmd - IncrBy(key string, value int64) *IntCmd - IncrByFloat(key string, value float64) *FloatCmd - MGet(keys ...string) *SliceCmd - MSet(pairs ...interface{}) *StatusCmd - MSetNX(pairs ...interface{}) *BoolCmd - Set(key string, value interface{}, expiration time.Duration) *StatusCmd - SetBit(key string, offset int64, value int) *IntCmd - SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd - SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd - SetRange(key string, offset int64, value string) *IntCmd - StrLen(key string) *IntCmd - HDel(key string, fields ...string) *IntCmd - HExists(key, field string) *BoolCmd - HGet(key, field string) *StringCmd - HGetAll(key string) *StringStringMapCmd - HIncrBy(key, field string, incr int64) *IntCmd - HIncrByFloat(key, field string, incr float64) *FloatCmd - HKeys(key string) *StringSliceCmd - HLen(key string) *IntCmd - HMGet(key string, fields ...string) *SliceCmd - HMSet(key string, fields map[string]interface{}) *StatusCmd - HSet(key, field string, value interface{}) *BoolCmd - HSetNX(key, field string, value interface{}) *BoolCmd - HVals(key string) *StringSliceCmd - BLPop(timeout time.Duration, keys ...string) *StringSliceCmd - BRPop(timeout time.Duration, keys ...string) *StringSliceCmd - BRPopLPush(source, destination string, timeout time.Duration) *StringCmd - LIndex(key string, index int64) *StringCmd - LInsert(key, op string, pivot, value interface{}) *IntCmd - LInsertBefore(key string, pivot, value interface{}) *IntCmd - LInsertAfter(key string, pivot, value interface{}) *IntCmd - LLen(key string) *IntCmd - LPop(key string) *StringCmd - LPush(key string, values ...interface{}) *IntCmd - LPushX(key string, value interface{}) *IntCmd - LRange(key string, start, stop int64) *StringSliceCmd - LRem(key string, count int64, value interface{}) *IntCmd - LSet(key string, index int64, value interface{}) *StatusCmd - LTrim(key string, start, stop int64) *StatusCmd - RPop(key string) *StringCmd - RPopLPush(source, destination string) *StringCmd - RPush(key string, values ...interface{}) *IntCmd - RPushX(key string, value interface{}) *IntCmd - SAdd(key string, members ...interface{}) *IntCmd - SCard(key string) *IntCmd - SDiff(keys ...string) *StringSliceCmd - SDiffStore(destination string, keys ...string) *IntCmd - SInter(keys ...string) *StringSliceCmd - SInterStore(destination string, keys ...string) *IntCmd - SIsMember(key string, member interface{}) *BoolCmd - SMembers(key string) *StringSliceCmd - SMembersMap(key string) *StringStructMapCmd - SMove(source, destination string, member interface{}) *BoolCmd - SPop(key string) *StringCmd - SPopN(key string, count int64) *StringSliceCmd - SRandMember(key string) *StringCmd - SRandMemberN(key string, count int64) *StringSliceCmd - SRem(key string, members ...interface{}) *IntCmd - SUnion(keys ...string) *StringSliceCmd - SUnionStore(destination string, keys ...string) *IntCmd - ZAdd(key string, members ...Z) *IntCmd - ZAddNX(key string, members ...Z) *IntCmd - ZAddXX(key string, members ...Z) *IntCmd - ZAddCh(key string, members ...Z) *IntCmd - ZAddNXCh(key string, members ...Z) *IntCmd - ZAddXXCh(key string, members ...Z) *IntCmd - ZIncr(key string, member Z) *FloatCmd - ZIncrNX(key string, member Z) *FloatCmd - ZIncrXX(key string, member Z) *FloatCmd - ZCard(key string) *IntCmd - ZCount(key, min, max string) *IntCmd - ZLexCount(key, min, max string) *IntCmd - ZIncrBy(key string, increment float64, member string) *FloatCmd - ZInterStore(destination string, store ZStore, keys ...string) *IntCmd - ZRange(key string, start, stop int64) *StringSliceCmd - ZRangeWithScores(key string, start, stop int64) *ZSliceCmd - ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd - ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd - ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd - ZRank(key, member string) *IntCmd - ZRem(key string, members ...interface{}) *IntCmd - ZRemRangeByRank(key string, start, stop int64) *IntCmd - ZRemRangeByScore(key, min, max string) *IntCmd - ZRemRangeByLex(key, min, max string) *IntCmd - ZRevRange(key string, start, stop int64) *StringSliceCmd - ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd - ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd - ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd - ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd - ZRevRank(key, member string) *IntCmd - ZScore(key, member string) *FloatCmd - ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd - PFAdd(key string, els ...interface{}) *IntCmd - PFCount(keys ...string) *IntCmd - PFMerge(dest string, keys ...string) *StatusCmd - BgRewriteAOF() *StatusCmd - BgSave() *StatusCmd - ClientKill(ipPort string) *StatusCmd - ClientList() *StringCmd - ClientPause(dur time.Duration) *BoolCmd - ConfigGet(parameter string) *SliceCmd - ConfigResetStat() *StatusCmd - ConfigSet(parameter, value string) *StatusCmd - DBSize() *IntCmd - FlushAll() *StatusCmd - FlushAllAsync() *StatusCmd - FlushDB() *StatusCmd - FlushDBAsync() *StatusCmd - Info(section ...string) *StringCmd - LastSave() *IntCmd - Save() *StatusCmd - Shutdown() *StatusCmd - ShutdownSave() *StatusCmd - ShutdownNoSave() *StatusCmd - SlaveOf(host, port string) *StatusCmd - Time() *TimeCmd - Eval(script string, keys []string, args ...interface{}) *Cmd - EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd - ScriptExists(scripts ...string) *BoolSliceCmd - ScriptFlush() *StatusCmd - ScriptKill() *StatusCmd - ScriptLoad(script string) *StringCmd - DebugObject(key string) *StringCmd - Publish(channel string, message interface{}) *IntCmd - PubSubChannels(pattern string) *StringSliceCmd - PubSubNumSub(channels ...string) *StringIntMapCmd - PubSubNumPat() *IntCmd - ClusterSlots() *ClusterSlotsCmd - ClusterNodes() *StringCmd - ClusterMeet(host, port string) *StatusCmd - ClusterForget(nodeID string) *StatusCmd - ClusterReplicate(nodeID string) *StatusCmd - ClusterResetSoft() *StatusCmd - ClusterResetHard() *StatusCmd - ClusterInfo() *StringCmd - ClusterKeySlot(key string) *IntCmd - ClusterCountFailureReports(nodeID string) *IntCmd - ClusterCountKeysInSlot(slot int) *IntCmd - ClusterDelSlots(slots ...int) *StatusCmd - ClusterDelSlotsRange(min, max int) *StatusCmd - ClusterSaveConfig() *StatusCmd - ClusterSlaves(nodeID string) *StringSliceCmd - ClusterFailover() *StatusCmd - ClusterAddSlots(slots ...int) *StatusCmd - ClusterAddSlotsRange(min, max int) *StatusCmd - GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd - GeoPos(key string, members ...string) *GeoPosCmd - GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd - GeoRadiusRO(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd - GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd - GeoRadiusByMemberRO(key, member string, query *GeoRadiusQuery) *GeoLocationCmd - GeoDist(key string, member1, member2, unit string) *FloatCmd - GeoHash(key string, members ...string) *StringSliceCmd - Command() *CommandsInfoCmd -} - -type StatefulCmdable interface { - Cmdable - Auth(password string) *StatusCmd - Select(index int) *StatusCmd - ClientSetName(name string) *BoolCmd - ReadOnly() *StatusCmd - ReadWrite() *StatusCmd -} - -var _ Cmdable = (*Client)(nil) -var _ Cmdable = (*Tx)(nil) -var _ Cmdable = (*Ring)(nil) -var _ Cmdable = (*ClusterClient)(nil) - -type cmdable struct { - process func(cmd Cmder) error -} - -func (c *cmdable) setProcessor(fn func(Cmder) error) { - c.process = fn -} - -type statefulCmdable struct { - cmdable - process func(cmd Cmder) error -} - -func (c *statefulCmdable) setProcessor(fn func(Cmder) error) { - c.process = fn - c.cmdable.setProcessor(fn) -} - -//------------------------------------------------------------------------------ - -func (c *statefulCmdable) Auth(password string) *StatusCmd { - cmd := NewStatusCmd("auth", password) - c.process(cmd) - return cmd -} - -func (c *cmdable) Echo(message interface{}) *StringCmd { - cmd := NewStringCmd("echo", message) - c.process(cmd) - return cmd -} - -func (c *cmdable) Ping() *StatusCmd { - cmd := NewStatusCmd("ping") - c.process(cmd) - return cmd -} - -func (c *cmdable) Wait(numSlaves int, timeout time.Duration) *IntCmd { - cmd := NewIntCmd("wait", numSlaves, int(timeout/time.Millisecond)) - c.process(cmd) - return cmd -} - -func (c *cmdable) Quit() *StatusCmd { - panic("not implemented") -} - -func (c *statefulCmdable) Select(index int) *StatusCmd { - cmd := NewStatusCmd("select", index) - c.process(cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c *cmdable) Del(keys ...string) *IntCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "del" - for i, key := range keys { - args[1+i] = key - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) Unlink(keys ...string) *IntCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "unlink" - for i, key := range keys { - args[1+i] = key - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) Dump(key string) *StringCmd { - cmd := NewStringCmd("dump", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) Exists(keys ...string) *IntCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "exists" - for i, key := range keys { - args[1+i] = key - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) Expire(key string, expiration time.Duration) *BoolCmd { - cmd := NewBoolCmd("expire", key, formatSec(expiration)) - c.process(cmd) - return cmd -} - -func (c *cmdable) ExpireAt(key string, tm time.Time) *BoolCmd { - cmd := NewBoolCmd("expireat", key, tm.Unix()) - c.process(cmd) - return cmd -} - -func (c *cmdable) Keys(pattern string) *StringSliceCmd { - cmd := NewStringSliceCmd("keys", pattern) - c.process(cmd) - return cmd -} - -func (c *cmdable) Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd { - cmd := NewStatusCmd( - "migrate", - host, - port, - key, - db, - formatMs(timeout), - ) - cmd.setReadTimeout(readTimeout(timeout)) - c.process(cmd) - return cmd -} - -func (c *cmdable) Move(key string, db int64) *BoolCmd { - cmd := NewBoolCmd("move", key, db) - c.process(cmd) - return cmd -} - -func (c *cmdable) ObjectRefCount(key string) *IntCmd { - cmd := NewIntCmd("object", "refcount", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) ObjectEncoding(key string) *StringCmd { - cmd := NewStringCmd("object", "encoding", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) ObjectIdleTime(key string) *DurationCmd { - cmd := NewDurationCmd(time.Second, "object", "idletime", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) Persist(key string) *BoolCmd { - cmd := NewBoolCmd("persist", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) PExpire(key string, expiration time.Duration) *BoolCmd { - cmd := NewBoolCmd("pexpire", key, formatMs(expiration)) - c.process(cmd) - return cmd -} - -func (c *cmdable) PExpireAt(key string, tm time.Time) *BoolCmd { - cmd := NewBoolCmd( - "pexpireat", - key, - tm.UnixNano()/int64(time.Millisecond), - ) - c.process(cmd) - return cmd -} - -func (c *cmdable) PTTL(key string) *DurationCmd { - cmd := NewDurationCmd(time.Millisecond, "pttl", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) RandomKey() *StringCmd { - cmd := NewStringCmd("randomkey") - c.process(cmd) - return cmd -} - -func (c *cmdable) Rename(key, newkey string) *StatusCmd { - cmd := NewStatusCmd("rename", key, newkey) - c.process(cmd) - return cmd -} - -func (c *cmdable) RenameNX(key, newkey string) *BoolCmd { - cmd := NewBoolCmd("renamenx", key, newkey) - c.process(cmd) - return cmd -} - -func (c *cmdable) Restore(key string, ttl time.Duration, value string) *StatusCmd { - cmd := NewStatusCmd( - "restore", - key, - formatMs(ttl), - value, - ) - c.process(cmd) - return cmd -} - -func (c *cmdable) RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd { - cmd := NewStatusCmd( - "restore", - key, - formatMs(ttl), - value, - "replace", - ) - c.process(cmd) - return cmd -} - -type Sort struct { - By string - Offset, Count float64 - Get []string - Order string - IsAlpha bool - Store string -} - -func (sort *Sort) args(key string) []interface{} { - args := []interface{}{"sort", key} - if sort.By != "" { - args = append(args, "by", sort.By) - } - if sort.Offset != 0 || sort.Count != 0 { - args = append(args, "limit", sort.Offset, sort.Count) - } - for _, get := range sort.Get { - args = append(args, "get", get) - } - if sort.Order != "" { - args = append(args, sort.Order) - } - if sort.IsAlpha { - args = append(args, "alpha") - } - if sort.Store != "" { - args = append(args, "store", sort.Store) - } - return args -} - -func (c *cmdable) Sort(key string, sort Sort) *StringSliceCmd { - cmd := NewStringSliceCmd(sort.args(key)...) - c.process(cmd) - return cmd -} - -func (c *cmdable) SortInterfaces(key string, sort Sort) *SliceCmd { - cmd := NewSliceCmd(sort.args(key)...) - c.process(cmd) - return cmd -} - -func (c *cmdable) TTL(key string) *DurationCmd { - cmd := NewDurationCmd(time.Second, "ttl", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) Type(key string) *StatusCmd { - cmd := NewStatusCmd("type", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) Scan(cursor uint64, match string, count int64) *ScanCmd { - args := []interface{}{"scan", cursor} - if match != "" { - args = append(args, "match", match) - } - if count > 0 { - args = append(args, "count", count) - } - cmd := NewScanCmd(c.process, args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) SScan(key string, cursor uint64, match string, count int64) *ScanCmd { - args := []interface{}{"sscan", key, cursor} - if match != "" { - args = append(args, "match", match) - } - if count > 0 { - args = append(args, "count", count) - } - cmd := NewScanCmd(c.process, args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) HScan(key string, cursor uint64, match string, count int64) *ScanCmd { - args := []interface{}{"hscan", key, cursor} - if match != "" { - args = append(args, "match", match) - } - if count > 0 { - args = append(args, "count", count) - } - cmd := NewScanCmd(c.process, args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZScan(key string, cursor uint64, match string, count int64) *ScanCmd { - args := []interface{}{"zscan", key, cursor} - if match != "" { - args = append(args, "match", match) - } - if count > 0 { - args = append(args, "count", count) - } - cmd := NewScanCmd(c.process, args...) - c.process(cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c *cmdable) Append(key, value string) *IntCmd { - cmd := NewIntCmd("append", key, value) - c.process(cmd) - return cmd -} - -type BitCount struct { - Start, End int64 -} - -func (c *cmdable) BitCount(key string, bitCount *BitCount) *IntCmd { - args := []interface{}{"bitcount", key} - if bitCount != nil { - args = append( - args, - bitCount.Start, - bitCount.End, - ) - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) bitOp(op, destKey string, keys ...string) *IntCmd { - args := make([]interface{}, 3+len(keys)) - args[0] = "bitop" - args[1] = op - args[2] = destKey - for i, key := range keys { - args[3+i] = key - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) BitOpAnd(destKey string, keys ...string) *IntCmd { - return c.bitOp("and", destKey, keys...) -} - -func (c *cmdable) BitOpOr(destKey string, keys ...string) *IntCmd { - return c.bitOp("or", destKey, keys...) -} - -func (c *cmdable) BitOpXor(destKey string, keys ...string) *IntCmd { - return c.bitOp("xor", destKey, keys...) -} - -func (c *cmdable) BitOpNot(destKey string, key string) *IntCmd { - return c.bitOp("not", destKey, key) -} - -func (c *cmdable) BitPos(key string, bit int64, pos ...int64) *IntCmd { - args := make([]interface{}, 3+len(pos)) - args[0] = "bitpos" - args[1] = key - args[2] = bit - switch len(pos) { - case 0: - case 1: - args[3] = pos[0] - case 2: - args[3] = pos[0] - args[4] = pos[1] - default: - panic("too many arguments") - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) Decr(key string) *IntCmd { - cmd := NewIntCmd("decr", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) DecrBy(key string, decrement int64) *IntCmd { - cmd := NewIntCmd("decrby", key, decrement) - c.process(cmd) - return cmd -} - -// Redis `GET key` command. It returns redis.Nil error when key does not exist. -func (c *cmdable) Get(key string) *StringCmd { - cmd := NewStringCmd("get", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) GetBit(key string, offset int64) *IntCmd { - cmd := NewIntCmd("getbit", key, offset) - c.process(cmd) - return cmd -} - -func (c *cmdable) GetRange(key string, start, end int64) *StringCmd { - cmd := NewStringCmd("getrange", key, start, end) - c.process(cmd) - return cmd -} - -func (c *cmdable) GetSet(key string, value interface{}) *StringCmd { - cmd := NewStringCmd("getset", key, value) - c.process(cmd) - return cmd -} - -func (c *cmdable) Incr(key string) *IntCmd { - cmd := NewIntCmd("incr", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) IncrBy(key string, value int64) *IntCmd { - cmd := NewIntCmd("incrby", key, value) - c.process(cmd) - return cmd -} - -func (c *cmdable) IncrByFloat(key string, value float64) *FloatCmd { - cmd := NewFloatCmd("incrbyfloat", key, value) - c.process(cmd) - return cmd -} - -func (c *cmdable) MGet(keys ...string) *SliceCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "mget" - for i, key := range keys { - args[1+i] = key - } - cmd := NewSliceCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) MSet(pairs ...interface{}) *StatusCmd { - args := make([]interface{}, 1+len(pairs)) - args[0] = "mset" - for i, pair := range pairs { - args[1+i] = pair - } - cmd := NewStatusCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) MSetNX(pairs ...interface{}) *BoolCmd { - args := make([]interface{}, 1+len(pairs)) - args[0] = "msetnx" - for i, pair := range pairs { - args[1+i] = pair - } - cmd := NewBoolCmd(args...) - c.process(cmd) - return cmd -} - -// Redis `SET key value [expiration]` command. -// -// Use expiration for `SETEX`-like behavior. -// Zero expiration means the key has no expiration time. -func (c *cmdable) Set(key string, value interface{}, expiration time.Duration) *StatusCmd { - args := make([]interface{}, 3, 4) - args[0] = "set" - args[1] = key - args[2] = value - if expiration > 0 { - if usePrecise(expiration) { - args = append(args, "px", formatMs(expiration)) - } else { - args = append(args, "ex", formatSec(expiration)) - } - } - cmd := NewStatusCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) SetBit(key string, offset int64, value int) *IntCmd { - cmd := NewIntCmd( - "setbit", - key, - offset, - value, - ) - c.process(cmd) - return cmd -} - -// Redis `SET key value [expiration] NX` command. -// -// Zero expiration means the key has no expiration time. -func (c *cmdable) SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd { - var cmd *BoolCmd - if expiration == 0 { - // Use old `SETNX` to support old Redis versions. - cmd = NewBoolCmd("setnx", key, value) - } else { - if usePrecise(expiration) { - cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "nx") - } else { - cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "nx") - } - } - c.process(cmd) - return cmd -} - -// Redis `SET key value [expiration] XX` command. -// -// Zero expiration means the key has no expiration time. -func (c *cmdable) SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd { - var cmd *BoolCmd - if expiration == 0 { - cmd = NewBoolCmd("set", key, value, "xx") - } else { - if usePrecise(expiration) { - cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "xx") - } else { - cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "xx") - } - } - c.process(cmd) - return cmd -} - -func (c *cmdable) SetRange(key string, offset int64, value string) *IntCmd { - cmd := NewIntCmd("setrange", key, offset, value) - c.process(cmd) - return cmd -} - -func (c *cmdable) StrLen(key string) *IntCmd { - cmd := NewIntCmd("strlen", key) - c.process(cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c *cmdable) HDel(key string, fields ...string) *IntCmd { - args := make([]interface{}, 2+len(fields)) - args[0] = "hdel" - args[1] = key - for i, field := range fields { - args[2+i] = field - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) HExists(key, field string) *BoolCmd { - cmd := NewBoolCmd("hexists", key, field) - c.process(cmd) - return cmd -} - -func (c *cmdable) HGet(key, field string) *StringCmd { - cmd := NewStringCmd("hget", key, field) - c.process(cmd) - return cmd -} - -func (c *cmdable) HGetAll(key string) *StringStringMapCmd { - cmd := NewStringStringMapCmd("hgetall", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) HIncrBy(key, field string, incr int64) *IntCmd { - cmd := NewIntCmd("hincrby", key, field, incr) - c.process(cmd) - return cmd -} - -func (c *cmdable) HIncrByFloat(key, field string, incr float64) *FloatCmd { - cmd := NewFloatCmd("hincrbyfloat", key, field, incr) - c.process(cmd) - return cmd -} - -func (c *cmdable) HKeys(key string) *StringSliceCmd { - cmd := NewStringSliceCmd("hkeys", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) HLen(key string) *IntCmd { - cmd := NewIntCmd("hlen", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) HMGet(key string, fields ...string) *SliceCmd { - args := make([]interface{}, 2+len(fields)) - args[0] = "hmget" - args[1] = key - for i, field := range fields { - args[2+i] = field - } - cmd := NewSliceCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) HMSet(key string, fields map[string]interface{}) *StatusCmd { - args := make([]interface{}, 2+len(fields)*2) - args[0] = "hmset" - args[1] = key - i := 2 - for k, v := range fields { - args[i] = k - args[i+1] = v - i += 2 - } - cmd := NewStatusCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) HSet(key, field string, value interface{}) *BoolCmd { - cmd := NewBoolCmd("hset", key, field, value) - c.process(cmd) - return cmd -} - -func (c *cmdable) HSetNX(key, field string, value interface{}) *BoolCmd { - cmd := NewBoolCmd("hsetnx", key, field, value) - c.process(cmd) - return cmd -} - -func (c *cmdable) HVals(key string) *StringSliceCmd { - cmd := NewStringSliceCmd("hvals", key) - c.process(cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c *cmdable) BLPop(timeout time.Duration, keys ...string) *StringSliceCmd { - args := make([]interface{}, 1+len(keys)+1) - args[0] = "blpop" - for i, key := range keys { - args[1+i] = key - } - args[len(args)-1] = formatSec(timeout) - cmd := NewStringSliceCmd(args...) - cmd.setReadTimeout(readTimeout(timeout)) - c.process(cmd) - return cmd -} - -func (c *cmdable) BRPop(timeout time.Duration, keys ...string) *StringSliceCmd { - args := make([]interface{}, 1+len(keys)+1) - args[0] = "brpop" - for i, key := range keys { - args[1+i] = key - } - args[len(keys)+1] = formatSec(timeout) - cmd := NewStringSliceCmd(args...) - cmd.setReadTimeout(readTimeout(timeout)) - c.process(cmd) - return cmd -} - -func (c *cmdable) BRPopLPush(source, destination string, timeout time.Duration) *StringCmd { - cmd := NewStringCmd( - "brpoplpush", - source, - destination, - formatSec(timeout), - ) - cmd.setReadTimeout(readTimeout(timeout)) - c.process(cmd) - return cmd -} - -func (c *cmdable) LIndex(key string, index int64) *StringCmd { - cmd := NewStringCmd("lindex", key, index) - c.process(cmd) - return cmd -} - -func (c *cmdable) LInsert(key, op string, pivot, value interface{}) *IntCmd { - cmd := NewIntCmd("linsert", key, op, pivot, value) - c.process(cmd) - return cmd -} - -func (c *cmdable) LInsertBefore(key string, pivot, value interface{}) *IntCmd { - cmd := NewIntCmd("linsert", key, "before", pivot, value) - c.process(cmd) - return cmd -} - -func (c *cmdable) LInsertAfter(key string, pivot, value interface{}) *IntCmd { - cmd := NewIntCmd("linsert", key, "after", pivot, value) - c.process(cmd) - return cmd -} - -func (c *cmdable) LLen(key string) *IntCmd { - cmd := NewIntCmd("llen", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) LPop(key string) *StringCmd { - cmd := NewStringCmd("lpop", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) LPush(key string, values ...interface{}) *IntCmd { - args := make([]interface{}, 2+len(values)) - args[0] = "lpush" - args[1] = key - for i, value := range values { - args[2+i] = value - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) LPushX(key string, value interface{}) *IntCmd { - cmd := NewIntCmd("lpushx", key, value) - c.process(cmd) - return cmd -} - -func (c *cmdable) LRange(key string, start, stop int64) *StringSliceCmd { - cmd := NewStringSliceCmd( - "lrange", - key, - start, - stop, - ) - c.process(cmd) - return cmd -} - -func (c *cmdable) LRem(key string, count int64, value interface{}) *IntCmd { - cmd := NewIntCmd("lrem", key, count, value) - c.process(cmd) - return cmd -} - -func (c *cmdable) LSet(key string, index int64, value interface{}) *StatusCmd { - cmd := NewStatusCmd("lset", key, index, value) - c.process(cmd) - return cmd -} - -func (c *cmdable) LTrim(key string, start, stop int64) *StatusCmd { - cmd := NewStatusCmd( - "ltrim", - key, - start, - stop, - ) - c.process(cmd) - return cmd -} - -func (c *cmdable) RPop(key string) *StringCmd { - cmd := NewStringCmd("rpop", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) RPopLPush(source, destination string) *StringCmd { - cmd := NewStringCmd("rpoplpush", source, destination) - c.process(cmd) - return cmd -} - -func (c *cmdable) RPush(key string, values ...interface{}) *IntCmd { - args := make([]interface{}, 2+len(values)) - args[0] = "rpush" - args[1] = key - for i, value := range values { - args[2+i] = value - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) RPushX(key string, value interface{}) *IntCmd { - cmd := NewIntCmd("rpushx", key, value) - c.process(cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c *cmdable) SAdd(key string, members ...interface{}) *IntCmd { - args := make([]interface{}, 2+len(members)) - args[0] = "sadd" - args[1] = key - for i, member := range members { - args[2+i] = member - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) SCard(key string) *IntCmd { - cmd := NewIntCmd("scard", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) SDiff(keys ...string) *StringSliceCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "sdiff" - for i, key := range keys { - args[1+i] = key - } - cmd := NewStringSliceCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) SDiffStore(destination string, keys ...string) *IntCmd { - args := make([]interface{}, 2+len(keys)) - args[0] = "sdiffstore" - args[1] = destination - for i, key := range keys { - args[2+i] = key - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) SInter(keys ...string) *StringSliceCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "sinter" - for i, key := range keys { - args[1+i] = key - } - cmd := NewStringSliceCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) SInterStore(destination string, keys ...string) *IntCmd { - args := make([]interface{}, 2+len(keys)) - args[0] = "sinterstore" - args[1] = destination - for i, key := range keys { - args[2+i] = key - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) SIsMember(key string, member interface{}) *BoolCmd { - cmd := NewBoolCmd("sismember", key, member) - c.process(cmd) - return cmd -} - -// Redis `SMEMBERS key` command output as a slice -func (c *cmdable) SMembers(key string) *StringSliceCmd { - cmd := NewStringSliceCmd("smembers", key) - c.process(cmd) - return cmd -} - -// Redis `SMEMBERS key` command output as a map -func (c *cmdable) SMembersMap(key string) *StringStructMapCmd { - cmd := NewStringStructMapCmd("smembers", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) SMove(source, destination string, member interface{}) *BoolCmd { - cmd := NewBoolCmd("smove", source, destination, member) - c.process(cmd) - return cmd -} - -// Redis `SPOP key` command. -func (c *cmdable) SPop(key string) *StringCmd { - cmd := NewStringCmd("spop", key) - c.process(cmd) - return cmd -} - -// Redis `SPOP key count` command. -func (c *cmdable) SPopN(key string, count int64) *StringSliceCmd { - cmd := NewStringSliceCmd("spop", key, count) - c.process(cmd) - return cmd -} - -// Redis `SRANDMEMBER key` command. -func (c *cmdable) SRandMember(key string) *StringCmd { - cmd := NewStringCmd("srandmember", key) - c.process(cmd) - return cmd -} - -// Redis `SRANDMEMBER key count` command. -func (c *cmdable) SRandMemberN(key string, count int64) *StringSliceCmd { - cmd := NewStringSliceCmd("srandmember", key, count) - c.process(cmd) - return cmd -} - -func (c *cmdable) SRem(key string, members ...interface{}) *IntCmd { - args := make([]interface{}, 2+len(members)) - args[0] = "srem" - args[1] = key - for i, member := range members { - args[2+i] = member - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) SUnion(keys ...string) *StringSliceCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "sunion" - for i, key := range keys { - args[1+i] = key - } - cmd := NewStringSliceCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) SUnionStore(destination string, keys ...string) *IntCmd { - args := make([]interface{}, 2+len(keys)) - args[0] = "sunionstore" - args[1] = destination - for i, key := range keys { - args[2+i] = key - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -// Z represents sorted set member. -type Z struct { - Score float64 - Member interface{} -} - -// ZStore is used as an arg to ZInterStore and ZUnionStore. -type ZStore struct { - Weights []float64 - // Can be SUM, MIN or MAX. - Aggregate string -} - -func (c *cmdable) zAdd(a []interface{}, n int, members ...Z) *IntCmd { - for i, m := range members { - a[n+2*i] = m.Score - a[n+2*i+1] = m.Member - } - cmd := NewIntCmd(a...) - c.process(cmd) - return cmd -} - -// Redis `ZADD key score member [score member ...]` command. -func (c *cmdable) ZAdd(key string, members ...Z) *IntCmd { - const n = 2 - a := make([]interface{}, n+2*len(members)) - a[0], a[1] = "zadd", key - return c.zAdd(a, n, members...) -} - -// Redis `ZADD key NX score member [score member ...]` command. -func (c *cmdable) ZAddNX(key string, members ...Z) *IntCmd { - const n = 3 - a := make([]interface{}, n+2*len(members)) - a[0], a[1], a[2] = "zadd", key, "nx" - return c.zAdd(a, n, members...) -} - -// Redis `ZADD key XX score member [score member ...]` command. -func (c *cmdable) ZAddXX(key string, members ...Z) *IntCmd { - const n = 3 - a := make([]interface{}, n+2*len(members)) - a[0], a[1], a[2] = "zadd", key, "xx" - return c.zAdd(a, n, members...) -} - -// Redis `ZADD key CH score member [score member ...]` command. -func (c *cmdable) ZAddCh(key string, members ...Z) *IntCmd { - const n = 3 - a := make([]interface{}, n+2*len(members)) - a[0], a[1], a[2] = "zadd", key, "ch" - return c.zAdd(a, n, members...) -} - -// Redis `ZADD key NX CH score member [score member ...]` command. -func (c *cmdable) ZAddNXCh(key string, members ...Z) *IntCmd { - const n = 4 - a := make([]interface{}, n+2*len(members)) - a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch" - return c.zAdd(a, n, members...) -} - -// Redis `ZADD key XX CH score member [score member ...]` command. -func (c *cmdable) ZAddXXCh(key string, members ...Z) *IntCmd { - const n = 4 - a := make([]interface{}, n+2*len(members)) - a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch" - return c.zAdd(a, n, members...) -} - -func (c *cmdable) zIncr(a []interface{}, n int, members ...Z) *FloatCmd { - for i, m := range members { - a[n+2*i] = m.Score - a[n+2*i+1] = m.Member - } - cmd := NewFloatCmd(a...) - c.process(cmd) - return cmd -} - -// Redis `ZADD key INCR score member` command. -func (c *cmdable) ZIncr(key string, member Z) *FloatCmd { - const n = 3 - a := make([]interface{}, n+2) - a[0], a[1], a[2] = "zadd", key, "incr" - return c.zIncr(a, n, member) -} - -// Redis `ZADD key NX INCR score member` command. -func (c *cmdable) ZIncrNX(key string, member Z) *FloatCmd { - const n = 4 - a := make([]interface{}, n+2) - a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx" - return c.zIncr(a, n, member) -} - -// Redis `ZADD key XX INCR score member` command. -func (c *cmdable) ZIncrXX(key string, member Z) *FloatCmd { - const n = 4 - a := make([]interface{}, n+2) - a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx" - return c.zIncr(a, n, member) -} - -func (c *cmdable) ZCard(key string) *IntCmd { - cmd := NewIntCmd("zcard", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZCount(key, min, max string) *IntCmd { - cmd := NewIntCmd("zcount", key, min, max) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZLexCount(key, min, max string) *IntCmd { - cmd := NewIntCmd("zlexcount", key, min, max) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZIncrBy(key string, increment float64, member string) *FloatCmd { - cmd := NewFloatCmd("zincrby", key, increment, member) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZInterStore(destination string, store ZStore, keys ...string) *IntCmd { - args := make([]interface{}, 3+len(keys)) - args[0] = "zinterstore" - args[1] = destination - args[2] = len(keys) - for i, key := range keys { - args[3+i] = key - } - if len(store.Weights) > 0 { - args = append(args, "weights") - for _, weight := range store.Weights { - args = append(args, weight) - } - } - if store.Aggregate != "" { - args = append(args, "aggregate", store.Aggregate) - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd { - args := []interface{}{ - "zrange", - key, - start, - stop, - } - if withScores { - args = append(args, "withscores") - } - cmd := NewStringSliceCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZRange(key string, start, stop int64) *StringSliceCmd { - return c.zRange(key, start, stop, false) -} - -func (c *cmdable) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd { - cmd := NewZSliceCmd("zrange", key, start, stop, "withscores") - c.process(cmd) - return cmd -} - -type ZRangeBy struct { - Min, Max string - Offset, Count int64 -} - -func (c *cmdable) zRangeBy(zcmd, key string, opt ZRangeBy, withScores bool) *StringSliceCmd { - args := []interface{}{zcmd, key, opt.Min, opt.Max} - if withScores { - args = append(args, "withscores") - } - if opt.Offset != 0 || opt.Count != 0 { - args = append( - args, - "limit", - opt.Offset, - opt.Count, - ) - } - cmd := NewStringSliceCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd { - return c.zRangeBy("zrangebyscore", key, opt, false) -} - -func (c *cmdable) ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd { - return c.zRangeBy("zrangebylex", key, opt, false) -} - -func (c *cmdable) ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd { - args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"} - if opt.Offset != 0 || opt.Count != 0 { - args = append( - args, - "limit", - opt.Offset, - opt.Count, - ) - } - cmd := NewZSliceCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZRank(key, member string) *IntCmd { - cmd := NewIntCmd("zrank", key, member) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZRem(key string, members ...interface{}) *IntCmd { - args := make([]interface{}, 2+len(members)) - args[0] = "zrem" - args[1] = key - for i, member := range members { - args[2+i] = member - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZRemRangeByRank(key string, start, stop int64) *IntCmd { - cmd := NewIntCmd( - "zremrangebyrank", - key, - start, - stop, - ) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZRemRangeByScore(key, min, max string) *IntCmd { - cmd := NewIntCmd("zremrangebyscore", key, min, max) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZRemRangeByLex(key, min, max string) *IntCmd { - cmd := NewIntCmd("zremrangebylex", key, min, max) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZRevRange(key string, start, stop int64) *StringSliceCmd { - cmd := NewStringSliceCmd("zrevrange", key, start, stop) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd { - cmd := NewZSliceCmd("zrevrange", key, start, stop, "withscores") - c.process(cmd) - return cmd -} - -func (c *cmdable) zRevRangeBy(zcmd, key string, opt ZRangeBy) *StringSliceCmd { - args := []interface{}{zcmd, key, opt.Max, opt.Min} - if opt.Offset != 0 || opt.Count != 0 { - args = append( - args, - "limit", - opt.Offset, - opt.Count, - ) - } - cmd := NewStringSliceCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd { - return c.zRevRangeBy("zrevrangebyscore", key, opt) -} - -func (c *cmdable) ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd { - return c.zRevRangeBy("zrevrangebylex", key, opt) -} - -func (c *cmdable) ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd { - args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"} - if opt.Offset != 0 || opt.Count != 0 { - args = append( - args, - "limit", - opt.Offset, - opt.Count, - ) - } - cmd := NewZSliceCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZRevRank(key, member string) *IntCmd { - cmd := NewIntCmd("zrevrank", key, member) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZScore(key, member string) *FloatCmd { - cmd := NewFloatCmd("zscore", key, member) - c.process(cmd) - return cmd -} - -func (c *cmdable) ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd { - args := make([]interface{}, 3+len(keys)) - args[0] = "zunionstore" - args[1] = dest - args[2] = len(keys) - for i, key := range keys { - args[3+i] = key - } - if len(store.Weights) > 0 { - args = append(args, "weights") - for _, weight := range store.Weights { - args = append(args, weight) - } - } - if store.Aggregate != "" { - args = append(args, "aggregate", store.Aggregate) - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c *cmdable) PFAdd(key string, els ...interface{}) *IntCmd { - args := make([]interface{}, 2+len(els)) - args[0] = "pfadd" - args[1] = key - for i, el := range els { - args[2+i] = el - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) PFCount(keys ...string) *IntCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "pfcount" - for i, key := range keys { - args[1+i] = key - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) PFMerge(dest string, keys ...string) *StatusCmd { - args := make([]interface{}, 2+len(keys)) - args[0] = "pfmerge" - args[1] = dest - for i, key := range keys { - args[2+i] = key - } - cmd := NewStatusCmd(args...) - c.process(cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c *cmdable) BgRewriteAOF() *StatusCmd { - cmd := NewStatusCmd("bgrewriteaof") - c.process(cmd) - return cmd -} - -func (c *cmdable) BgSave() *StatusCmd { - cmd := NewStatusCmd("bgsave") - c.process(cmd) - return cmd -} - -func (c *cmdable) ClientKill(ipPort string) *StatusCmd { - cmd := NewStatusCmd("client", "kill", ipPort) - c.process(cmd) - return cmd -} - -func (c *cmdable) ClientList() *StringCmd { - cmd := NewStringCmd("client", "list") - c.process(cmd) - return cmd -} - -func (c *cmdable) ClientPause(dur time.Duration) *BoolCmd { - cmd := NewBoolCmd("client", "pause", formatMs(dur)) - c.process(cmd) - return cmd -} - -// ClientSetName assigns a name to the connection. -func (c *statefulCmdable) ClientSetName(name string) *BoolCmd { - cmd := NewBoolCmd("client", "setname", name) - c.process(cmd) - return cmd -} - -// ClientGetName returns the name of the connection. -func (c *cmdable) ClientGetName() *StringCmd { - cmd := NewStringCmd("client", "getname") - c.process(cmd) - return cmd -} - -func (c *cmdable) ConfigGet(parameter string) *SliceCmd { - cmd := NewSliceCmd("config", "get", parameter) - c.process(cmd) - return cmd -} - -func (c *cmdable) ConfigResetStat() *StatusCmd { - cmd := NewStatusCmd("config", "resetstat") - c.process(cmd) - return cmd -} - -func (c *cmdable) ConfigSet(parameter, value string) *StatusCmd { - cmd := NewStatusCmd("config", "set", parameter, value) - c.process(cmd) - return cmd -} - -// Deperecated. Use DBSize instead. -func (c *cmdable) DbSize() *IntCmd { - return c.DBSize() -} - -func (c *cmdable) DBSize() *IntCmd { - cmd := NewIntCmd("dbsize") - c.process(cmd) - return cmd -} - -func (c *cmdable) FlushAll() *StatusCmd { - cmd := NewStatusCmd("flushall") - c.process(cmd) - return cmd -} - -func (c *cmdable) FlushAllAsync() *StatusCmd { - cmd := NewStatusCmd("flushall", "async") - c.process(cmd) - return cmd -} - -// Deprecated. Use FlushDB instead. -func (c *cmdable) FlushDb() *StatusCmd { - return c.FlushDB() -} - -func (c *cmdable) FlushDB() *StatusCmd { - cmd := NewStatusCmd("flushdb") - c.process(cmd) - return cmd -} - -func (c *cmdable) FlushDBAsync() *StatusCmd { - cmd := NewStatusCmd("flushdb", "async") - c.process(cmd) - return cmd -} - -func (c *cmdable) Info(section ...string) *StringCmd { - args := []interface{}{"info"} - if len(section) > 0 { - args = append(args, section[0]) - } - cmd := NewStringCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) LastSave() *IntCmd { - cmd := NewIntCmd("lastsave") - c.process(cmd) - return cmd -} - -func (c *cmdable) Save() *StatusCmd { - cmd := NewStatusCmd("save") - c.process(cmd) - return cmd -} - -func (c *cmdable) shutdown(modifier string) *StatusCmd { - var args []interface{} - if modifier == "" { - args = []interface{}{"shutdown"} - } else { - args = []interface{}{"shutdown", modifier} - } - cmd := NewStatusCmd(args...) - c.process(cmd) - if err := cmd.Err(); err != nil { - if err == io.EOF { - // Server quit as expected. - cmd.err = nil - } - } else { - // Server did not quit. String reply contains the reason. - cmd.err = internal.RedisError(cmd.val) - cmd.val = "" - } - return cmd -} - -func (c *cmdable) Shutdown() *StatusCmd { - return c.shutdown("") -} - -func (c *cmdable) ShutdownSave() *StatusCmd { - return c.shutdown("save") -} - -func (c *cmdable) ShutdownNoSave() *StatusCmd { - return c.shutdown("nosave") -} - -func (c *cmdable) SlaveOf(host, port string) *StatusCmd { - cmd := NewStatusCmd("slaveof", host, port) - c.process(cmd) - return cmd -} - -func (c *cmdable) SlowLog() { - panic("not implemented") -} - -func (c *cmdable) Sync() { - panic("not implemented") -} - -func (c *cmdable) Time() *TimeCmd { - cmd := NewTimeCmd("time") - c.process(cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c *cmdable) Eval(script string, keys []string, args ...interface{}) *Cmd { - cmdArgs := make([]interface{}, 3+len(keys)+len(args)) - cmdArgs[0] = "eval" - cmdArgs[1] = script - cmdArgs[2] = len(keys) - for i, key := range keys { - cmdArgs[3+i] = key - } - pos := 3 + len(keys) - for i, arg := range args { - cmdArgs[pos+i] = arg - } - cmd := NewCmd(cmdArgs...) - c.process(cmd) - return cmd -} - -func (c *cmdable) EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd { - cmdArgs := make([]interface{}, 3+len(keys)+len(args)) - cmdArgs[0] = "evalsha" - cmdArgs[1] = sha1 - cmdArgs[2] = len(keys) - for i, key := range keys { - cmdArgs[3+i] = key - } - pos := 3 + len(keys) - for i, arg := range args { - cmdArgs[pos+i] = arg - } - cmd := NewCmd(cmdArgs...) - c.process(cmd) - return cmd -} - -func (c *cmdable) ScriptExists(scripts ...string) *BoolSliceCmd { - args := make([]interface{}, 2+len(scripts)) - args[0] = "script" - args[1] = "exists" - for i, script := range scripts { - args[2+i] = script - } - cmd := NewBoolSliceCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) ScriptFlush() *StatusCmd { - cmd := NewStatusCmd("script", "flush") - c.process(cmd) - return cmd -} - -func (c *cmdable) ScriptKill() *StatusCmd { - cmd := NewStatusCmd("script", "kill") - c.process(cmd) - return cmd -} - -func (c *cmdable) ScriptLoad(script string) *StringCmd { - cmd := NewStringCmd("script", "load", script) - c.process(cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c *cmdable) DebugObject(key string) *StringCmd { - cmd := NewStringCmd("debug", "object", key) - c.process(cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -// Publish posts the message to the channel. -func (c *cmdable) Publish(channel string, message interface{}) *IntCmd { - cmd := NewIntCmd("publish", channel, message) - c.process(cmd) - return cmd -} - -func (c *cmdable) PubSubChannels(pattern string) *StringSliceCmd { - args := []interface{}{"pubsub", "channels"} - if pattern != "*" { - args = append(args, pattern) - } - cmd := NewStringSliceCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) PubSubNumSub(channels ...string) *StringIntMapCmd { - args := make([]interface{}, 2+len(channels)) - args[0] = "pubsub" - args[1] = "numsub" - for i, channel := range channels { - args[2+i] = channel - } - cmd := NewStringIntMapCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) PubSubNumPat() *IntCmd { - cmd := NewIntCmd("pubsub", "numpat") - c.process(cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c *cmdable) ClusterSlots() *ClusterSlotsCmd { - cmd := NewClusterSlotsCmd("cluster", "slots") - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterNodes() *StringCmd { - cmd := NewStringCmd("cluster", "nodes") - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterMeet(host, port string) *StatusCmd { - cmd := NewStatusCmd("cluster", "meet", host, port) - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterForget(nodeID string) *StatusCmd { - cmd := NewStatusCmd("cluster", "forget", nodeID) - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterReplicate(nodeID string) *StatusCmd { - cmd := NewStatusCmd("cluster", "replicate", nodeID) - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterResetSoft() *StatusCmd { - cmd := NewStatusCmd("cluster", "reset", "soft") - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterResetHard() *StatusCmd { - cmd := NewStatusCmd("cluster", "reset", "hard") - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterInfo() *StringCmd { - cmd := NewStringCmd("cluster", "info") - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterKeySlot(key string) *IntCmd { - cmd := NewIntCmd("cluster", "keyslot", key) - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterCountFailureReports(nodeID string) *IntCmd { - cmd := NewIntCmd("cluster", "count-failure-reports", nodeID) - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterCountKeysInSlot(slot int) *IntCmd { - cmd := NewIntCmd("cluster", "countkeysinslot", slot) - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterDelSlots(slots ...int) *StatusCmd { - args := make([]interface{}, 2+len(slots)) - args[0] = "cluster" - args[1] = "delslots" - for i, slot := range slots { - args[2+i] = slot - } - cmd := NewStatusCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterDelSlotsRange(min, max int) *StatusCmd { - size := max - min + 1 - slots := make([]int, size) - for i := 0; i < size; i++ { - slots[i] = min + i - } - return c.ClusterDelSlots(slots...) -} - -func (c *cmdable) ClusterSaveConfig() *StatusCmd { - cmd := NewStatusCmd("cluster", "saveconfig") - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterSlaves(nodeID string) *StringSliceCmd { - cmd := NewStringSliceCmd("cluster", "slaves", nodeID) - c.process(cmd) - return cmd -} - -func (c *statefulCmdable) ReadOnly() *StatusCmd { - cmd := NewStatusCmd("readonly") - c.process(cmd) - return cmd -} - -func (c *statefulCmdable) ReadWrite() *StatusCmd { - cmd := NewStatusCmd("readwrite") - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterFailover() *StatusCmd { - cmd := NewStatusCmd("cluster", "failover") - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterAddSlots(slots ...int) *StatusCmd { - args := make([]interface{}, 2+len(slots)) - args[0] = "cluster" - args[1] = "addslots" - for i, num := range slots { - args[2+i] = num - } - cmd := NewStatusCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) ClusterAddSlotsRange(min, max int) *StatusCmd { - size := max - min + 1 - slots := make([]int, size) - for i := 0; i < size; i++ { - slots[i] = min + i - } - return c.ClusterAddSlots(slots...) -} - -//------------------------------------------------------------------------------ - -func (c *cmdable) GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd { - args := make([]interface{}, 2+3*len(geoLocation)) - args[0] = "geoadd" - args[1] = key - for i, eachLoc := range geoLocation { - args[2+3*i] = eachLoc.Longitude - args[2+3*i+1] = eachLoc.Latitude - args[2+3*i+2] = eachLoc.Name - } - cmd := NewIntCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd { - cmd := NewGeoLocationCmd(query, "georadius", key, longitude, latitude) - c.process(cmd) - return cmd -} - -func (c *cmdable) GeoRadiusRO(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd { - cmd := NewGeoLocationCmd(query, "georadius_ro", key, longitude, latitude) - c.process(cmd) - return cmd -} - -func (c *cmdable) GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd { - cmd := NewGeoLocationCmd(query, "georadiusbymember", key, member) - c.process(cmd) - return cmd -} - -func (c *cmdable) GeoRadiusByMemberRO(key, member string, query *GeoRadiusQuery) *GeoLocationCmd { - cmd := NewGeoLocationCmd(query, "georadiusbymember_ro", key, member) - c.process(cmd) - return cmd -} - -func (c *cmdable) GeoDist(key string, member1, member2, unit string) *FloatCmd { - if unit == "" { - unit = "km" - } - cmd := NewFloatCmd("geodist", key, member1, member2, unit) - c.process(cmd) - return cmd -} - -func (c *cmdable) GeoHash(key string, members ...string) *StringSliceCmd { - args := make([]interface{}, 2+len(members)) - args[0] = "geohash" - args[1] = key - for i, member := range members { - args[2+i] = member - } - cmd := NewStringSliceCmd(args...) - c.process(cmd) - return cmd -} - -func (c *cmdable) GeoPos(key string, members ...string) *GeoPosCmd { - args := make([]interface{}, 2+len(members)) - args[0] = "geopos" - args[1] = key - for i, member := range members { - args[2+i] = member - } - cmd := NewGeoPosCmd(args...) - c.process(cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c *cmdable) Command() *CommandsInfoCmd { - cmd := NewCommandsInfoCmd("command") - c.process(cmd) - return cmd -} diff --git a/vendor/github.com/go-redis/redis/doc.go b/vendor/github.com/go-redis/redis/doc.go deleted file mode 100644 index 55262533a6..0000000000 --- a/vendor/github.com/go-redis/redis/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package redis implements a Redis client. -*/ -package redis diff --git a/vendor/github.com/go-redis/redis/internal/consistenthash/consistenthash.go b/vendor/github.com/go-redis/redis/internal/consistenthash/consistenthash.go deleted file mode 100644 index a9c56f0762..0000000000 --- a/vendor/github.com/go-redis/redis/internal/consistenthash/consistenthash.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package consistenthash provides an implementation of a ring hash. -package consistenthash - -import ( - "hash/crc32" - "sort" - "strconv" -) - -type Hash func(data []byte) uint32 - -type Map struct { - hash Hash - replicas int - keys []int // Sorted - hashMap map[int]string -} - -func New(replicas int, fn Hash) *Map { - m := &Map{ - replicas: replicas, - hash: fn, - hashMap: make(map[int]string), - } - if m.hash == nil { - m.hash = crc32.ChecksumIEEE - } - return m -} - -// Returns true if there are no items available. -func (m *Map) IsEmpty() bool { - return len(m.keys) == 0 -} - -// Adds some keys to the hash. -func (m *Map) Add(keys ...string) { - for _, key := range keys { - for i := 0; i < m.replicas; i++ { - hash := int(m.hash([]byte(strconv.Itoa(i) + key))) - m.keys = append(m.keys, hash) - m.hashMap[hash] = key - } - } - sort.Ints(m.keys) -} - -// Gets the closest item in the hash to the provided key. -func (m *Map) Get(key string) string { - if m.IsEmpty() { - return "" - } - - hash := int(m.hash([]byte(key))) - - // Binary search for appropriate replica. - idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash }) - - // Means we have cycled back to the first replica. - if idx == len(m.keys) { - idx = 0 - } - - return m.hashMap[m.keys[idx]] -} diff --git a/vendor/github.com/go-redis/redis/internal/error.go b/vendor/github.com/go-redis/redis/internal/error.go deleted file mode 100644 index 0898eeb622..0000000000 --- a/vendor/github.com/go-redis/redis/internal/error.go +++ /dev/null @@ -1,84 +0,0 @@ -package internal - -import ( - "io" - "net" - "strings" -) - -const Nil = RedisError("redis: nil") - -type RedisError string - -func (e RedisError) Error() string { return string(e) } - -func IsRetryableError(err error, retryNetError bool) bool { - if IsNetworkError(err) { - return retryNetError - } - s := err.Error() - if s == "ERR max number of clients reached" { - return true - } - if strings.HasPrefix(s, "LOADING ") { - return true - } - if strings.HasPrefix(s, "CLUSTERDOWN ") { - return true - } - return false -} - -func IsRedisError(err error) bool { - _, ok := err.(RedisError) - return ok -} - -func IsNetworkError(err error) bool { - if err == io.EOF { - return true - } - _, ok := err.(net.Error) - return ok -} - -func IsBadConn(err error, allowTimeout bool) bool { - if err == nil { - return false - } - if IsRedisError(err) { - return false - } - if allowTimeout { - if netErr, ok := err.(net.Error); ok && netErr.Timeout() { - return false - } - } - return true -} - -func IsMovedError(err error) (moved bool, ask bool, addr string) { - if !IsRedisError(err) { - return - } - - s := err.Error() - if strings.HasPrefix(s, "MOVED ") { - moved = true - } else if strings.HasPrefix(s, "ASK ") { - ask = true - } else { - return - } - - ind := strings.LastIndex(s, " ") - if ind == -1 { - return false, false, "" - } - addr = s[ind+1:] - return -} - -func IsLoadingError(err error) bool { - return strings.HasPrefix(err.Error(), "LOADING ") -} diff --git a/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go b/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go deleted file mode 100644 index 8c7ebbfa64..0000000000 --- a/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go +++ /dev/null @@ -1,77 +0,0 @@ -package hashtag - -import ( - "math/rand" - "strings" -) - -const SlotNumber = 16384 - -// CRC16 implementation according to CCITT standards. -// Copyright 2001-2010 Georges Menie (www.menie.org) -// Copyright 2013 The Go Authors. All rights reserved. -// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c -var crc16tab = [256]uint16{ - 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, - 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, - 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, - 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, - 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, - 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, - 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, - 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, - 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, - 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, - 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, - 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, - 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, - 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, - 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, - 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, - 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, - 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, - 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, - 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, - 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, - 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, - 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, - 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, - 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, - 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, - 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, - 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, - 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, - 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, - 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, - 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, -} - -func Key(key string) string { - if s := strings.IndexByte(key, '{'); s > -1 { - if e := strings.IndexByte(key[s+1:], '}'); e > 0 { - return key[s+1 : s+e+1] - } - } - return key -} - -func RandomSlot() int { - return rand.Intn(SlotNumber) -} - -// hashSlot returns a consistent slot number between 0 and 16383 -// for any given string key. -func Slot(key string) int { - if key == "" { - return RandomSlot() - } - key = Key(key) - return int(crc16sum(key)) % SlotNumber -} - -func crc16sum(key string) (crc uint16) { - for i := 0; i < len(key); i++ { - crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff] - } - return -} diff --git a/vendor/github.com/go-redis/redis/internal/internal.go b/vendor/github.com/go-redis/redis/internal/internal.go deleted file mode 100644 index ad3fc3c9ff..0000000000 --- a/vendor/github.com/go-redis/redis/internal/internal.go +++ /dev/null @@ -1,24 +0,0 @@ -package internal - -import ( - "math/rand" - "time" -) - -// Retry backoff with jitter sleep to prevent overloaded conditions during intervals -// https://www.awsarchitectureblog.com/2015/03/backoff.html -func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration { - if retry < 0 { - retry = 0 - } - - backoff := minBackoff << uint(retry) - if backoff > maxBackoff || backoff < minBackoff { - backoff = maxBackoff - } - - if backoff == 0 { - return 0 - } - return time.Duration(rand.Int63n(int64(backoff))) -} diff --git a/vendor/github.com/go-redis/redis/internal/log.go b/vendor/github.com/go-redis/redis/internal/log.go deleted file mode 100644 index fd14222eee..0000000000 --- a/vendor/github.com/go-redis/redis/internal/log.go +++ /dev/null @@ -1,15 +0,0 @@ -package internal - -import ( - "fmt" - "log" -) - -var Logger *log.Logger - -func Logf(s string, args ...interface{}) { - if Logger == nil { - return - } - Logger.Output(2, fmt.Sprintf(s, args...)) -} diff --git a/vendor/github.com/go-redis/redis/internal/once.go b/vendor/github.com/go-redis/redis/internal/once.go deleted file mode 100644 index 64f46272ae..0000000000 --- a/vendor/github.com/go-redis/redis/internal/once.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2014 The Camlistore Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "sync" - "sync/atomic" -) - -// A Once will perform a successful action exactly once. -// -// Unlike a sync.Once, this Once's func returns an error -// and is re-armed on failure. -type Once struct { - m sync.Mutex - done uint32 -} - -// Do calls the function f if and only if Do has not been invoked -// without error for this instance of Once. In other words, given -// var once Once -// if once.Do(f) is called multiple times, only the first call will -// invoke f, even if f has a different value in each invocation unless -// f returns an error. A new instance of Once is required for each -// function to execute. -// -// Do is intended for initialization that must be run exactly once. Since f -// is niladic, it may be necessary to use a function literal to capture the -// arguments to a function to be invoked by Do: -// err := config.once.Do(func() error { return config.init(filename) }) -func (o *Once) Do(f func() error) error { - if atomic.LoadUint32(&o.done) == 1 { - return nil - } - // Slow-path. - o.m.Lock() - defer o.m.Unlock() - var err error - if o.done == 0 { - err = f() - if err == nil { - atomic.StoreUint32(&o.done, 1) - } - } - return err -} diff --git a/vendor/github.com/go-redis/redis/internal/pool/conn.go b/vendor/github.com/go-redis/redis/internal/pool/conn.go deleted file mode 100644 index 8af51d9de6..0000000000 --- a/vendor/github.com/go-redis/redis/internal/pool/conn.go +++ /dev/null @@ -1,78 +0,0 @@ -package pool - -import ( - "net" - "sync/atomic" - "time" - - "github.com/go-redis/redis/internal/proto" -) - -var noDeadline = time.Time{} - -type Conn struct { - netConn net.Conn - - Rd *proto.Reader - Wb *proto.WriteBuffer - - Inited bool - usedAt atomic.Value -} - -func NewConn(netConn net.Conn) *Conn { - cn := &Conn{ - netConn: netConn, - Wb: proto.NewWriteBuffer(), - } - cn.Rd = proto.NewReader(cn.netConn) - cn.SetUsedAt(time.Now()) - return cn -} - -func (cn *Conn) UsedAt() time.Time { - return cn.usedAt.Load().(time.Time) -} - -func (cn *Conn) SetUsedAt(tm time.Time) { - cn.usedAt.Store(tm) -} - -func (cn *Conn) SetNetConn(netConn net.Conn) { - cn.netConn = netConn - cn.Rd.Reset(netConn) -} - -func (cn *Conn) IsStale(timeout time.Duration) bool { - return timeout > 0 && time.Since(cn.UsedAt()) > timeout -} - -func (cn *Conn) SetReadTimeout(timeout time.Duration) error { - now := time.Now() - cn.SetUsedAt(now) - if timeout > 0 { - return cn.netConn.SetReadDeadline(now.Add(timeout)) - } - return cn.netConn.SetReadDeadline(noDeadline) -} - -func (cn *Conn) SetWriteTimeout(timeout time.Duration) error { - now := time.Now() - cn.SetUsedAt(now) - if timeout > 0 { - return cn.netConn.SetWriteDeadline(now.Add(timeout)) - } - return cn.netConn.SetWriteDeadline(noDeadline) -} - -func (cn *Conn) Write(b []byte) (int, error) { - return cn.netConn.Write(b) -} - -func (cn *Conn) RemoteAddr() net.Addr { - return cn.netConn.RemoteAddr() -} - -func (cn *Conn) Close() error { - return cn.netConn.Close() -} diff --git a/vendor/github.com/go-redis/redis/internal/pool/pool.go b/vendor/github.com/go-redis/redis/internal/pool/pool.go deleted file mode 100644 index ae81905ea8..0000000000 --- a/vendor/github.com/go-redis/redis/internal/pool/pool.go +++ /dev/null @@ -1,377 +0,0 @@ -package pool - -import ( - "errors" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/go-redis/redis/internal" -) - -var ErrClosed = errors.New("redis: client is closed") -var ErrPoolTimeout = errors.New("redis: connection pool timeout") - -var timers = sync.Pool{ - New: func() interface{} { - t := time.NewTimer(time.Hour) - t.Stop() - return t - }, -} - -// Stats contains pool state information and accumulated stats. -type Stats struct { - Hits uint32 // number of times free connection was found in the pool - Misses uint32 // number of times free connection was NOT found in the pool - Timeouts uint32 // number of times a wait timeout occurred - - TotalConns uint32 // number of total connections in the pool - FreeConns uint32 // number of free connections in the pool - StaleConns uint32 // number of stale connections removed from the pool -} - -type Pooler interface { - NewConn() (*Conn, error) - CloseConn(*Conn) error - - Get() (*Conn, bool, error) - Put(*Conn) error - Remove(*Conn) error - - Len() int - FreeLen() int - Stats() *Stats - - Close() error -} - -type Options struct { - Dialer func() (net.Conn, error) - OnClose func(*Conn) error - - PoolSize int - PoolTimeout time.Duration - IdleTimeout time.Duration - IdleCheckFrequency time.Duration -} - -type ConnPool struct { - opt *Options - - dialErrorsNum uint32 // atomic - - lastDialError error - lastDialErrorMu sync.RWMutex - - queue chan struct{} - - connsMu sync.Mutex - conns []*Conn - - freeConnsMu sync.Mutex - freeConns []*Conn - - stats Stats - - _closed uint32 // atomic -} - -var _ Pooler = (*ConnPool)(nil) - -func NewConnPool(opt *Options) *ConnPool { - p := &ConnPool{ - opt: opt, - - queue: make(chan struct{}, opt.PoolSize), - conns: make([]*Conn, 0, opt.PoolSize), - freeConns: make([]*Conn, 0, opt.PoolSize), - } - if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 { - go p.reaper(opt.IdleCheckFrequency) - } - return p -} - -func (p *ConnPool) NewConn() (*Conn, error) { - if p.closed() { - return nil, ErrClosed - } - - if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) { - return nil, p.getLastDialError() - } - - netConn, err := p.opt.Dialer() - if err != nil { - p.setLastDialError(err) - if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) { - go p.tryDial() - } - return nil, err - } - - cn := NewConn(netConn) - p.connsMu.Lock() - p.conns = append(p.conns, cn) - p.connsMu.Unlock() - - return cn, nil -} - -func (p *ConnPool) tryDial() { - for { - if p.closed() { - return - } - - conn, err := p.opt.Dialer() - if err != nil { - p.setLastDialError(err) - time.Sleep(time.Second) - continue - } - - atomic.StoreUint32(&p.dialErrorsNum, 0) - _ = conn.Close() - return - } -} - -func (p *ConnPool) setLastDialError(err error) { - p.lastDialErrorMu.Lock() - p.lastDialError = err - p.lastDialErrorMu.Unlock() -} - -func (p *ConnPool) getLastDialError() error { - p.lastDialErrorMu.RLock() - err := p.lastDialError - p.lastDialErrorMu.RUnlock() - return err -} - -// Get returns existed connection from the pool or creates a new one. -func (p *ConnPool) Get() (*Conn, bool, error) { - if p.closed() { - return nil, false, ErrClosed - } - - select { - case p.queue <- struct{}{}: - default: - timer := timers.Get().(*time.Timer) - timer.Reset(p.opt.PoolTimeout) - - select { - case p.queue <- struct{}{}: - if !timer.Stop() { - <-timer.C - } - timers.Put(timer) - case <-timer.C: - timers.Put(timer) - atomic.AddUint32(&p.stats.Timeouts, 1) - return nil, false, ErrPoolTimeout - } - } - - for { - p.freeConnsMu.Lock() - cn := p.popFree() - p.freeConnsMu.Unlock() - - if cn == nil { - break - } - - if cn.IsStale(p.opt.IdleTimeout) { - p.CloseConn(cn) - continue - } - - atomic.AddUint32(&p.stats.Hits, 1) - return cn, false, nil - } - - atomic.AddUint32(&p.stats.Misses, 1) - - newcn, err := p.NewConn() - if err != nil { - <-p.queue - return nil, false, err - } - - return newcn, true, nil -} - -func (p *ConnPool) popFree() *Conn { - if len(p.freeConns) == 0 { - return nil - } - - idx := len(p.freeConns) - 1 - cn := p.freeConns[idx] - p.freeConns = p.freeConns[:idx] - return cn -} - -func (p *ConnPool) Put(cn *Conn) error { - if data := cn.Rd.PeekBuffered(); data != nil { - internal.Logf("connection has unread data: %q", data) - return p.Remove(cn) - } - p.freeConnsMu.Lock() - p.freeConns = append(p.freeConns, cn) - p.freeConnsMu.Unlock() - <-p.queue - return nil -} - -func (p *ConnPool) Remove(cn *Conn) error { - _ = p.CloseConn(cn) - <-p.queue - return nil -} - -func (p *ConnPool) CloseConn(cn *Conn) error { - p.connsMu.Lock() - for i, c := range p.conns { - if c == cn { - p.conns = append(p.conns[:i], p.conns[i+1:]...) - break - } - } - p.connsMu.Unlock() - - return p.closeConn(cn) -} - -func (p *ConnPool) closeConn(cn *Conn) error { - if p.opt.OnClose != nil { - _ = p.opt.OnClose(cn) - } - return cn.Close() -} - -// Len returns total number of connections. -func (p *ConnPool) Len() int { - p.connsMu.Lock() - l := len(p.conns) - p.connsMu.Unlock() - return l -} - -// FreeLen returns number of free connections. -func (p *ConnPool) FreeLen() int { - p.freeConnsMu.Lock() - l := len(p.freeConns) - p.freeConnsMu.Unlock() - return l -} - -func (p *ConnPool) Stats() *Stats { - return &Stats{ - Hits: atomic.LoadUint32(&p.stats.Hits), - Misses: atomic.LoadUint32(&p.stats.Misses), - Timeouts: atomic.LoadUint32(&p.stats.Timeouts), - - TotalConns: uint32(p.Len()), - FreeConns: uint32(p.FreeLen()), - StaleConns: atomic.LoadUint32(&p.stats.StaleConns), - } -} - -func (p *ConnPool) closed() bool { - return atomic.LoadUint32(&p._closed) == 1 -} - -func (p *ConnPool) Filter(fn func(*Conn) bool) error { - var firstErr error - p.connsMu.Lock() - for _, cn := range p.conns { - if fn(cn) { - if err := p.closeConn(cn); err != nil && firstErr == nil { - firstErr = err - } - } - } - p.connsMu.Unlock() - return firstErr -} - -func (p *ConnPool) Close() error { - if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) { - return ErrClosed - } - - var firstErr error - p.connsMu.Lock() - for _, cn := range p.conns { - if err := p.closeConn(cn); err != nil && firstErr == nil { - firstErr = err - } - } - p.conns = nil - p.connsMu.Unlock() - - p.freeConnsMu.Lock() - p.freeConns = nil - p.freeConnsMu.Unlock() - - return firstErr -} - -func (p *ConnPool) reapStaleConn() bool { - if len(p.freeConns) == 0 { - return false - } - - cn := p.freeConns[0] - if !cn.IsStale(p.opt.IdleTimeout) { - return false - } - - p.CloseConn(cn) - p.freeConns = append(p.freeConns[:0], p.freeConns[1:]...) - - return true -} - -func (p *ConnPool) ReapStaleConns() (int, error) { - var n int - for { - p.queue <- struct{}{} - p.freeConnsMu.Lock() - - reaped := p.reapStaleConn() - - p.freeConnsMu.Unlock() - <-p.queue - - if reaped { - n++ - } else { - break - } - } - return n, nil -} - -func (p *ConnPool) reaper(frequency time.Duration) { - ticker := time.NewTicker(frequency) - defer ticker.Stop() - - for range ticker.C { - if p.closed() { - break - } - n, err := p.ReapStaleConns() - if err != nil { - internal.Logf("ReapStaleConns failed: %s", err) - continue - } - atomic.AddUint32(&p.stats.StaleConns, uint32(n)) - } -} diff --git a/vendor/github.com/go-redis/redis/internal/pool/pool_single.go b/vendor/github.com/go-redis/redis/internal/pool/pool_single.go deleted file mode 100644 index ff91279b36..0000000000 --- a/vendor/github.com/go-redis/redis/internal/pool/pool_single.go +++ /dev/null @@ -1,55 +0,0 @@ -package pool - -type SingleConnPool struct { - cn *Conn -} - -var _ Pooler = (*SingleConnPool)(nil) - -func NewSingleConnPool(cn *Conn) *SingleConnPool { - return &SingleConnPool{ - cn: cn, - } -} - -func (p *SingleConnPool) NewConn() (*Conn, error) { - panic("not implemented") -} - -func (p *SingleConnPool) CloseConn(*Conn) error { - panic("not implemented") -} - -func (p *SingleConnPool) Get() (*Conn, bool, error) { - return p.cn, false, nil -} - -func (p *SingleConnPool) Put(cn *Conn) error { - if p.cn != cn { - panic("p.cn != cn") - } - return nil -} - -func (p *SingleConnPool) Remove(cn *Conn) error { - if p.cn != cn { - panic("p.cn != cn") - } - return nil -} - -func (p *SingleConnPool) Len() int { - return 1 -} - -func (p *SingleConnPool) FreeLen() int { - return 0 -} - -func (p *SingleConnPool) Stats() *Stats { - return nil -} - -func (p *SingleConnPool) Close() error { - return nil -} diff --git a/vendor/github.com/go-redis/redis/internal/pool/pool_sticky.go b/vendor/github.com/go-redis/redis/internal/pool/pool_sticky.go deleted file mode 100644 index 17f163858b..0000000000 --- a/vendor/github.com/go-redis/redis/internal/pool/pool_sticky.go +++ /dev/null @@ -1,123 +0,0 @@ -package pool - -import "sync" - -type StickyConnPool struct { - pool *ConnPool - reusable bool - - cn *Conn - closed bool - mu sync.Mutex -} - -var _ Pooler = (*StickyConnPool)(nil) - -func NewStickyConnPool(pool *ConnPool, reusable bool) *StickyConnPool { - return &StickyConnPool{ - pool: pool, - reusable: reusable, - } -} - -func (p *StickyConnPool) NewConn() (*Conn, error) { - panic("not implemented") -} - -func (p *StickyConnPool) CloseConn(*Conn) error { - panic("not implemented") -} - -func (p *StickyConnPool) Get() (*Conn, bool, error) { - p.mu.Lock() - defer p.mu.Unlock() - - if p.closed { - return nil, false, ErrClosed - } - if p.cn != nil { - return p.cn, false, nil - } - - cn, _, err := p.pool.Get() - if err != nil { - return nil, false, err - } - p.cn = cn - return cn, true, nil -} - -func (p *StickyConnPool) putUpstream() (err error) { - err = p.pool.Put(p.cn) - p.cn = nil - return err -} - -func (p *StickyConnPool) Put(cn *Conn) error { - p.mu.Lock() - defer p.mu.Unlock() - - if p.closed { - return ErrClosed - } - return nil -} - -func (p *StickyConnPool) removeUpstream() error { - err := p.pool.Remove(p.cn) - p.cn = nil - return err -} - -func (p *StickyConnPool) Remove(cn *Conn) error { - p.mu.Lock() - defer p.mu.Unlock() - - if p.closed { - return nil - } - return p.removeUpstream() -} - -func (p *StickyConnPool) Len() int { - p.mu.Lock() - defer p.mu.Unlock() - - if p.cn == nil { - return 0 - } - return 1 -} - -func (p *StickyConnPool) FreeLen() int { - p.mu.Lock() - defer p.mu.Unlock() - - if p.cn == nil { - return 1 - } - return 0 -} - -func (p *StickyConnPool) Stats() *Stats { - return nil -} - -func (p *StickyConnPool) Close() error { - p.mu.Lock() - defer p.mu.Unlock() - - if p.closed { - return ErrClosed - } - p.closed = true - var err error - if p.cn != nil { - if p.reusable { - err = p.putUpstream() - } else { - err = p.removeUpstream() - } - } - return err -} diff --git a/vendor/github.com/go-redis/redis/internal/proto/reader.go b/vendor/github.com/go-redis/redis/internal/proto/reader.go deleted file mode 100644 index cd94329d8d..0000000000 --- a/vendor/github.com/go-redis/redis/internal/proto/reader.go +++ /dev/null @@ -1,334 +0,0 @@ -package proto - -import ( - "bufio" - "fmt" - "io" - "strconv" - - "github.com/go-redis/redis/internal" -) - -const bytesAllocLimit = 1024 * 1024 // 1mb - -const ( - ErrorReply = '-' - StatusReply = '+' - IntReply = ':' - StringReply = '$' - ArrayReply = '*' -) - -type MultiBulkParse func(*Reader, int64) (interface{}, error) - -type Reader struct { - src *bufio.Reader - buf []byte -} - -func NewReader(rd io.Reader) *Reader { - return &Reader{ - src: bufio.NewReader(rd), - buf: make([]byte, 4096), - } -} - -func (r *Reader) Reset(rd io.Reader) { - r.src.Reset(rd) -} - -func (p *Reader) PeekBuffered() []byte { - if n := p.src.Buffered(); n != 0 { - b, _ := p.src.Peek(n) - return b - } - return nil -} - -func (p *Reader) ReadN(n int) ([]byte, error) { - b, err := readN(p.src, p.buf, n) - if err != nil { - return nil, err - } - p.buf = b - return b, nil -} - -func (p *Reader) ReadLine() ([]byte, error) { - line, isPrefix, err := p.src.ReadLine() - if err != nil { - return nil, err - } - if isPrefix { - return nil, bufio.ErrBufferFull - } - if len(line) == 0 { - return nil, fmt.Errorf("redis: reply is empty") - } - if isNilReply(line) { - return nil, internal.Nil - } - return line, nil -} - -func (p *Reader) ReadReply(m MultiBulkParse) (interface{}, error) { - line, err := p.ReadLine() - if err != nil { - return nil, err - } - - switch line[0] { - case ErrorReply: - return nil, ParseErrorReply(line) - case StatusReply: - return parseStatusValue(line), nil - case IntReply: - return parseInt(line[1:], 10, 64) - case StringReply: - return p.readTmpBytesValue(line) - case ArrayReply: - n, err := parseArrayLen(line) - if err != nil { - return nil, err - } - return m(p, n) - } - return nil, fmt.Errorf("redis: can't parse %.100q", line) -} - -func (p *Reader) ReadIntReply() (int64, error) { - line, err := p.ReadLine() - if err != nil { - return 0, err - } - switch line[0] { - case ErrorReply: - return 0, ParseErrorReply(line) - case IntReply: - return parseInt(line[1:], 10, 64) - default: - return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line) - } -} - -func (p *Reader) ReadTmpBytesReply() ([]byte, error) { - line, err := p.ReadLine() - if err != nil { - return nil, err - } - switch line[0] { - case ErrorReply: - return nil, ParseErrorReply(line) - case StringReply: - return p.readTmpBytesValue(line) - case StatusReply: - return parseStatusValue(line), nil - default: - return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line) - } -} - -func (r *Reader) ReadBytesReply() ([]byte, error) { - b, err := r.ReadTmpBytesReply() - if err != nil { - return nil, err - } - cp := make([]byte, len(b)) - copy(cp, b) - return cp, nil -} - -func (p *Reader) ReadStringReply() (string, error) { - b, err := p.ReadTmpBytesReply() - if err != nil { - return "", err - } - return string(b), nil -} - -func (p *Reader) ReadFloatReply() (float64, error) { - b, err := p.ReadTmpBytesReply() - if err != nil { - return 0, err - } - return parseFloat(b, 64) -} - -func (p *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) { - line, err := p.ReadLine() - if err != nil { - return nil, err - } - switch line[0] { - case ErrorReply: - return nil, ParseErrorReply(line) - case ArrayReply: - n, err := parseArrayLen(line) - if err != nil { - return nil, err - } - return m(p, n) - default: - return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line) - } -} - -func (p *Reader) ReadArrayLen() (int64, error) { - line, err := p.ReadLine() - if err != nil { - return 0, err - } - switch line[0] { - case ErrorReply: - return 0, ParseErrorReply(line) - case ArrayReply: - return parseArrayLen(line) - default: - return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line) - } -} - -func (p *Reader) ReadScanReply() ([]string, uint64, error) { - n, err := p.ReadArrayLen() - if err != nil { - return nil, 0, err - } - if n != 2 { - return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n) - } - - cursor, err := p.ReadUint() - if err != nil { - return nil, 0, err - } - - n, err = p.ReadArrayLen() - if err != nil { - return nil, 0, err - } - - keys := make([]string, n) - for i := int64(0); i < n; i++ { - key, err := p.ReadStringReply() - if err != nil { - return nil, 0, err - } - keys[i] = key - } - - return keys, cursor, err -} - -func (p *Reader) readTmpBytesValue(line []byte) ([]byte, error) { - if isNilReply(line) { - return nil, internal.Nil - } - - replyLen, err := strconv.Atoi(string(line[1:])) - if err != nil { - return nil, err - } - - b, err := p.ReadN(replyLen + 2) - if err != nil { - return nil, err - } - return b[:replyLen], nil -} - -func (r *Reader) ReadInt() (int64, error) { - b, err := r.ReadTmpBytesReply() - if err != nil { - return 0, err - } - return parseInt(b, 10, 64) -} - -func (r *Reader) ReadUint() (uint64, error) { - b, err := r.ReadTmpBytesReply() - if err != nil { - return 0, err - } - return parseUint(b, 10, 64) -} - -// -------------------------------------------------------------------- - -func readN(r io.Reader, b []byte, n int) ([]byte, error) { - if n == 0 && b == nil { - return make([]byte, 0), nil - } - - if cap(b) >= n { - b = b[:n] - _, err := io.ReadFull(r, b) - return b, err - } - b = b[:cap(b)] - - pos := 0 - for pos < n { - diff := n - len(b) - if diff > bytesAllocLimit { - diff = bytesAllocLimit - } - b = append(b, make([]byte, diff)...) - - nn, err := io.ReadFull(r, b[pos:]) - if err != nil { - return nil, err - } - pos += nn - } - - return b, nil -} - -func formatInt(n int64) string { - return strconv.FormatInt(n, 10) -} - -func formatUint(u uint64) string { - return strconv.FormatUint(u, 10) -} - -func formatFloat(f float64) string { - return strconv.FormatFloat(f, 'f', -1, 64) -} - -func isNilReply(b []byte) bool { - return len(b) == 3 && - (b[0] == StringReply || b[0] == ArrayReply) && - b[1] == '-' && b[2] == '1' -} - -func ParseErrorReply(line []byte) error { - return internal.RedisError(string(line[1:])) -} - -func parseStatusValue(line []byte) []byte { - return line[1:] -} - -func parseArrayLen(line []byte) (int64, error) { - if isNilReply(line) { - return 0, internal.Nil - } - return parseInt(line[1:], 10, 64) -} - -func atoi(b []byte) (int, error) { - return strconv.Atoi(internal.BytesToString(b)) -} - -func parseInt(b []byte, base int, bitSize int) (int64, error) { - return strconv.ParseInt(internal.BytesToString(b), base, bitSize) -} - -func parseUint(b []byte, base int, bitSize int) (uint64, error) { - return strconv.ParseUint(internal.BytesToString(b), base, bitSize) -} - -func parseFloat(b []byte, bitSize int) (float64, error) { - return strconv.ParseFloat(internal.BytesToString(b), bitSize) -} diff --git a/vendor/github.com/go-redis/redis/internal/proto/scan.go b/vendor/github.com/go-redis/redis/internal/proto/scan.go deleted file mode 100644 index 0329ffd991..0000000000 --- a/vendor/github.com/go-redis/redis/internal/proto/scan.go +++ /dev/null @@ -1,133 +0,0 @@ -package proto - -import ( - "encoding" - "fmt" - "reflect" - - "github.com/go-redis/redis/internal" -) - -func Scan(b []byte, v interface{}) error { - switch v := v.(type) { - case nil: - return fmt.Errorf("redis: Scan(nil)") - case *string: - *v = internal.BytesToString(b) - return nil - case *[]byte: - *v = b - return nil - case *int: - var err error - *v, err = atoi(b) - return err - case *int8: - n, err := parseInt(b, 10, 8) - if err != nil { - return err - } - *v = int8(n) - return nil - case *int16: - n, err := parseInt(b, 10, 16) - if err != nil { - return err - } - *v = int16(n) - return nil - case *int32: - n, err := parseInt(b, 10, 32) - if err != nil { - return err - } - *v = int32(n) - return nil - case *int64: - n, err := parseInt(b, 10, 64) - if err != nil { - return err - } - *v = n - return nil - case *uint: - n, err := parseUint(b, 10, 64) - if err != nil { - return err - } - *v = uint(n) - return nil - case *uint8: - n, err := parseUint(b, 10, 8) - if err != nil { - return err - } - *v = uint8(n) - return nil - case *uint16: - n, err := parseUint(b, 10, 16) - if err != nil { - return err - } - *v = uint16(n) - return nil - case *uint32: - n, err := parseUint(b, 10, 32) - if err != nil { - return err - } - *v = uint32(n) - return nil - case *uint64: - n, err := parseUint(b, 10, 64) - if err != nil { - return err - } - *v = n - return nil - case *float32: - n, err := parseFloat(b, 32) - if err != nil { - return err - } - *v = float32(n) - return err - case *float64: - var err error - *v, err = parseFloat(b, 64) - return err - case *bool: - *v = len(b) == 1 && b[0] == '1' - return nil - case encoding.BinaryUnmarshaler: - return v.UnmarshalBinary(b) - default: - return fmt.Errorf( - "redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v) - } -} - -func ScanSlice(data []string, slice interface{}) error { - v := reflect.ValueOf(slice) - if !v.IsValid() { - return fmt.Errorf("redis: ScanSlice(nil)") - } - if v.Kind() != reflect.Ptr { - return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice) - } - v = v.Elem() - if v.Kind() != reflect.Slice { - return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice) - } - - next := internal.MakeSliceNextElemFunc(v) - for i, s := range data { - elem := next() - if err := Scan([]byte(s), elem.Addr().Interface()); err != nil { - err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %s", i, s, err) - return err - } - } - - return nil -} diff --git a/vendor/github.com/go-redis/redis/internal/proto/write_buffer.go b/vendor/github.com/go-redis/redis/internal/proto/write_buffer.go deleted file mode 100644 index 096b6d76af..0000000000 --- a/vendor/github.com/go-redis/redis/internal/proto/write_buffer.go +++ /dev/null @@ -1,103 +0,0 @@ -package proto - -import ( - "encoding" - "fmt" - "strconv" -) - -type WriteBuffer struct { - b []byte -} - -func NewWriteBuffer() *WriteBuffer { - return &WriteBuffer{ - b: make([]byte, 0, 4096), - } -} - -func (w *WriteBuffer) Len() int { return len(w.b) } -func (w *WriteBuffer) Bytes() []byte { return w.b } -func (w *WriteBuffer) Reset() { w.b = w.b[:0] } - -func (w *WriteBuffer) Append(args []interface{}) error { - w.b = append(w.b, ArrayReply) - w.b = strconv.AppendUint(w.b, uint64(len(args)), 10) - w.b = append(w.b, '\r', '\n') - - for _, arg := range args { - if err := w.append(arg); err != nil { - return err - } - } - return nil -} - -func (w *WriteBuffer) append(val interface{}) error { - switch v := val.(type) { - case nil: - w.AppendString("") - case string: - w.AppendString(v) - case []byte: - w.AppendBytes(v) - case int: - w.AppendString(formatInt(int64(v))) - case int8: - w.AppendString(formatInt(int64(v))) - case int16: - w.AppendString(formatInt(int64(v))) - case int32: - w.AppendString(formatInt(int64(v))) - case int64: - w.AppendString(formatInt(v)) - case uint: - w.AppendString(formatUint(uint64(v))) - case uint8: - w.AppendString(formatUint(uint64(v))) - case uint16: - w.AppendString(formatUint(uint64(v))) - case uint32: - w.AppendString(formatUint(uint64(v))) - case uint64: - w.AppendString(formatUint(v)) - case float32: - w.AppendString(formatFloat(float64(v))) - case float64: - w.AppendString(formatFloat(v)) - case bool: - if v { - w.AppendString("1") - } else { - w.AppendString("0") - } - default: - if bm, ok := val.(encoding.BinaryMarshaler); ok { - bb, err := bm.MarshalBinary() - if err != nil { - return err - } - w.AppendBytes(bb) - } else { - return fmt.Errorf( - "redis: can't marshal %T (consider implementing encoding.BinaryMarshaler)", val) - } - } - return nil -} - -func (w *WriteBuffer) AppendString(s string) { - w.b = append(w.b, StringReply) - w.b = strconv.AppendUint(w.b, uint64(len(s)), 10) - w.b = append(w.b, '\r', '\n') - w.b = append(w.b, s...) - w.b = append(w.b, '\r', '\n') -} - -func (w *WriteBuffer) AppendBytes(p []byte) { - w.b = append(w.b, StringReply) - w.b = strconv.AppendUint(w.b, uint64(len(p)), 10) - w.b = append(w.b, '\r', '\n') - w.b = append(w.b, p...) - w.b = append(w.b, '\r', '\n') -} diff --git a/vendor/github.com/go-redis/redis/internal/safe.go b/vendor/github.com/go-redis/redis/internal/safe.go deleted file mode 100644 index dc5f4cc8a4..0000000000 --- a/vendor/github.com/go-redis/redis/internal/safe.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build appengine - -package internal - -func BytesToString(b []byte) string { - return string(b) -} diff --git a/vendor/github.com/go-redis/redis/internal/unsafe.go b/vendor/github.com/go-redis/redis/internal/unsafe.go deleted file mode 100644 index 3ae48c14b9..0000000000 --- a/vendor/github.com/go-redis/redis/internal/unsafe.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !appengine - -package internal - -import ( - "unsafe" -) - -// BytesToString converts byte slice to string. -func BytesToString(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) -} diff --git a/vendor/github.com/go-redis/redis/internal/util.go b/vendor/github.com/go-redis/redis/internal/util.go deleted file mode 100644 index 1ba9805fe3..0000000000 --- a/vendor/github.com/go-redis/redis/internal/util.go +++ /dev/null @@ -1,62 +0,0 @@ -package internal - -import "reflect" - -func ToLower(s string) string { - if isLower(s) { - return s - } - - b := make([]byte, len(s)) - for i := range b { - c := s[i] - if c >= 'A' && c <= 'Z' { - c += 'a' - 'A' - } - b[i] = c - } - return BytesToString(b) -} - -func isLower(s string) bool { - for i := 0; i < len(s); i++ { - c := s[i] - if c >= 'A' && c <= 'Z' { - return false - } - } - return true -} - -func MakeSliceNextElemFunc(v reflect.Value) func() reflect.Value { - elemType := v.Type().Elem() - - if elemType.Kind() == reflect.Ptr { - elemType = elemType.Elem() - return func() reflect.Value { - if v.Len() < v.Cap() { - v.Set(v.Slice(0, v.Len()+1)) - elem := v.Index(v.Len() - 1) - if elem.IsNil() { - elem.Set(reflect.New(elemType)) - } - return elem.Elem() - } - - elem := reflect.New(elemType) - v.Set(reflect.Append(v, elem)) - return elem.Elem() - } - } - - zero := reflect.Zero(elemType) - return func() reflect.Value { - if v.Len() < v.Cap() { - v.Set(v.Slice(0, v.Len()+1)) - return v.Index(v.Len() - 1) - } - - v.Set(reflect.Append(v, zero)) - return v.Index(v.Len() - 1) - } -} diff --git a/vendor/github.com/go-redis/redis/iterator.go b/vendor/github.com/go-redis/redis/iterator.go deleted file mode 100644 index 5d4bedfe5d..0000000000 --- a/vendor/github.com/go-redis/redis/iterator.go +++ /dev/null @@ -1,73 +0,0 @@ -package redis - -import "sync" - -// ScanIterator is used to incrementally iterate over a collection of elements. -// It's safe for concurrent use by multiple goroutines. -type ScanIterator struct { - mu sync.Mutex // protects Scanner and pos - cmd *ScanCmd - pos int -} - -// Err returns the last iterator error, if any. -func (it *ScanIterator) Err() error { - it.mu.Lock() - err := it.cmd.Err() - it.mu.Unlock() - return err -} - -// Next advances the cursor and returns true if more values can be read. -func (it *ScanIterator) Next() bool { - it.mu.Lock() - defer it.mu.Unlock() - - // Instantly return on errors. - if it.cmd.Err() != nil { - return false - } - - // Advance cursor, check if we are still within range. - if it.pos < len(it.cmd.page) { - it.pos++ - return true - } - - for { - // Return if there is no more data to fetch. - if it.cmd.cursor == 0 { - return false - } - - // Fetch next page. - if it.cmd._args[0] == "scan" { - it.cmd._args[1] = it.cmd.cursor - } else { - it.cmd._args[2] = it.cmd.cursor - } - - err := it.cmd.process(it.cmd) - if err != nil { - return false - } - - it.pos = 1 - - // Redis can occasionally return empty page. - if len(it.cmd.page) > 0 { - return true - } - } -} - -// Val returns the key/field at the current cursor position. -func (it *ScanIterator) Val() string { - var v string - it.mu.Lock() - if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) { - v = it.cmd.page[it.pos-1] - } - it.mu.Unlock() - return v -} diff --git a/vendor/github.com/go-redis/redis/options.go b/vendor/github.com/go-redis/redis/options.go deleted file mode 100644 index 75648053de..0000000000 --- a/vendor/github.com/go-redis/redis/options.go +++ /dev/null @@ -1,200 +0,0 @@ -package redis - -import ( - "crypto/tls" - "errors" - "fmt" - "net" - "net/url" - "runtime" - "strconv" - "strings" - "time" - - "github.com/go-redis/redis/internal/pool" -) - -type Options struct { - // The network type, either tcp or unix. - // Default is tcp. - Network string - // host:port address. - Addr string - - // Dialer creates new network connection and has priority over - // Network and Addr options. - Dialer func() (net.Conn, error) - - // Hook that is called when new connection is established. - OnConnect func(*Conn) error - - // Optional password. Must match the password specified in the - // requirepass server configuration option. - Password string - // Database to be selected after connecting to the server. - DB int - - // Maximum number of retries before giving up. - // Default is to not retry failed commands. - MaxRetries int - // Minimum backoff between each retry. - // Default is 8 milliseconds; -1 disables backoff. - MinRetryBackoff time.Duration - // Maximum backoff between each retry. - // Default is 512 milliseconds; -1 disables backoff. - MaxRetryBackoff time.Duration - - // Dial timeout for establishing new connections. - // Default is 5 seconds. - DialTimeout time.Duration - // Timeout for socket reads. If reached, commands will fail - // with a timeout instead of blocking. - // Default is 3 seconds. - ReadTimeout time.Duration - // Timeout for socket writes. If reached, commands will fail - // with a timeout instead of blocking. - // Default is ReadTimeout. - WriteTimeout time.Duration - - // Maximum number of socket connections. - // Default is 10 connections per every CPU as reported by runtime.NumCPU. - PoolSize int - // Amount of time client waits for connection if all connections - // are busy before returning an error. - // Default is ReadTimeout + 1 second. - PoolTimeout time.Duration - // Amount of time after which client closes idle connections. - // Should be less than server's timeout. - // Default is 5 minutes. - IdleTimeout time.Duration - // Frequency of idle checks. - // Default is 1 minute. - // When minus value is set, then idle check is disabled. - IdleCheckFrequency time.Duration - - // Enables read only queries on slave nodes. - readOnly bool - - // TLS Config to use. When set TLS will be negotiated. - TLSConfig *tls.Config -} - -func (opt *Options) init() { - if opt.Network == "" { - opt.Network = "tcp" - } - if opt.Dialer == nil { - opt.Dialer = func() (net.Conn, error) { - conn, err := net.DialTimeout(opt.Network, opt.Addr, opt.DialTimeout) - if opt.TLSConfig == nil || err != nil { - return conn, err - } - t := tls.Client(conn, opt.TLSConfig) - return t, t.Handshake() - } - } - if opt.PoolSize == 0 { - opt.PoolSize = 10 * runtime.NumCPU() - } - if opt.DialTimeout == 0 { - opt.DialTimeout = 5 * time.Second - } - switch opt.ReadTimeout { - case -1: - opt.ReadTimeout = 0 - case 0: - opt.ReadTimeout = 3 * time.Second - } - switch opt.WriteTimeout { - case -1: - opt.WriteTimeout = 0 - case 0: - opt.WriteTimeout = opt.ReadTimeout - } - if opt.PoolTimeout == 0 { - opt.PoolTimeout = opt.ReadTimeout + time.Second - } - if opt.IdleTimeout == 0 { - opt.IdleTimeout = 5 * time.Minute - } - if opt.IdleCheckFrequency == 0 { - opt.IdleCheckFrequency = time.Minute - } - - switch opt.MinRetryBackoff { - case -1: - opt.MinRetryBackoff = 0 - case 0: - opt.MinRetryBackoff = 8 * time.Millisecond - } - switch opt.MaxRetryBackoff { - case -1: - opt.MaxRetryBackoff = 0 - case 0: - opt.MaxRetryBackoff = 512 * time.Millisecond - } -} - -// ParseURL parses an URL into Options that can be used to connect to Redis. -func ParseURL(redisURL string) (*Options, error) { - o := &Options{Network: "tcp"} - u, err := url.Parse(redisURL) - if err != nil { - return nil, err - } - - if u.Scheme != "redis" && u.Scheme != "rediss" { - return nil, errors.New("invalid redis URL scheme: " + u.Scheme) - } - - if u.User != nil { - if p, ok := u.User.Password(); ok { - o.Password = p - } - } - - if len(u.Query()) > 0 { - return nil, errors.New("no options supported") - } - - h, p, err := net.SplitHostPort(u.Host) - if err != nil { - h = u.Host - } - if h == "" { - h = "localhost" - } - if p == "" { - p = "6379" - } - o.Addr = net.JoinHostPort(h, p) - - f := strings.FieldsFunc(u.Path, func(r rune) bool { - return r == '/' - }) - switch len(f) { - case 0: - o.DB = 0 - case 1: - if o.DB, err = strconv.Atoi(f[0]); err != nil { - return nil, fmt.Errorf("invalid redis database number: %q", f[0]) - } - default: - return nil, errors.New("invalid redis URL path: " + u.Path) - } - - if u.Scheme == "rediss" { - o.TLSConfig = &tls.Config{ServerName: h} - } - return o, nil -} - -func newConnPool(opt *Options) *pool.ConnPool { - return pool.NewConnPool(&pool.Options{ - Dialer: opt.Dialer, - PoolSize: opt.PoolSize, - PoolTimeout: opt.PoolTimeout, - IdleTimeout: opt.IdleTimeout, - IdleCheckFrequency: opt.IdleCheckFrequency, - }) -} diff --git a/vendor/github.com/go-redis/redis/parser.go b/vendor/github.com/go-redis/redis/parser.go deleted file mode 100644 index b378abc4ee..0000000000 --- a/vendor/github.com/go-redis/redis/parser.go +++ /dev/null @@ -1,388 +0,0 @@ -package redis - -import ( - "fmt" - "net" - "strconv" - "time" - - "github.com/go-redis/redis/internal/proto" -) - -// Implements proto.MultiBulkParse -func sliceParser(rd *proto.Reader, n int64) (interface{}, error) { - vals := make([]interface{}, 0, n) - for i := int64(0); i < n; i++ { - v, err := rd.ReadReply(sliceParser) - if err == Nil { - vals = append(vals, nil) - } else if err != nil { - return nil, err - } else { - switch vv := v.(type) { - case []byte: - vals = append(vals, string(vv)) - default: - vals = append(vals, v) - } - } - } - return vals, nil -} - -// Implements proto.MultiBulkParse -func boolSliceParser(rd *proto.Reader, n int64) (interface{}, error) { - bools := make([]bool, 0, n) - for i := int64(0); i < n; i++ { - n, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - bools = append(bools, n == 1) - } - return bools, nil -} - -// Implements proto.MultiBulkParse -func stringSliceParser(rd *proto.Reader, n int64) (interface{}, error) { - ss := make([]string, 0, n) - for i := int64(0); i < n; i++ { - s, err := rd.ReadStringReply() - if err == Nil { - ss = append(ss, "") - } else if err != nil { - return nil, err - } else { - ss = append(ss, s) - } - } - return ss, nil -} - -// Implements proto.MultiBulkParse -func stringStringMapParser(rd *proto.Reader, n int64) (interface{}, error) { - m := make(map[string]string, n/2) - for i := int64(0); i < n; i += 2 { - key, err := rd.ReadStringReply() - if err != nil { - return nil, err - } - - value, err := rd.ReadStringReply() - if err != nil { - return nil, err - } - - m[key] = value - } - return m, nil -} - -// Implements proto.MultiBulkParse -func stringIntMapParser(rd *proto.Reader, n int64) (interface{}, error) { - m := make(map[string]int64, n/2) - for i := int64(0); i < n; i += 2 { - key, err := rd.ReadStringReply() - if err != nil { - return nil, err - } - - n, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - m[key] = n - } - return m, nil -} - -// Implements proto.MultiBulkParse -func stringStructMapParser(rd *proto.Reader, n int64) (interface{}, error) { - m := make(map[string]struct{}, n) - for i := int64(0); i < n; i++ { - key, err := rd.ReadStringReply() - if err != nil { - return nil, err - } - - m[key] = struct{}{} - } - return m, nil -} - -// Implements proto.MultiBulkParse -func zSliceParser(rd *proto.Reader, n int64) (interface{}, error) { - zz := make([]Z, n/2) - for i := int64(0); i < n; i += 2 { - var err error - - z := &zz[i/2] - - z.Member, err = rd.ReadStringReply() - if err != nil { - return nil, err - } - - z.Score, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - } - return zz, nil -} - -// Implements proto.MultiBulkParse -func clusterSlotsParser(rd *proto.Reader, n int64) (interface{}, error) { - slots := make([]ClusterSlot, n) - for i := 0; i < len(slots); i++ { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if n < 2 { - err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n) - return nil, err - } - - start, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - end, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - nodes := make([]ClusterNode, n-2) - for j := 0; j < len(nodes); j++ { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if n != 2 && n != 3 { - err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n) - return nil, err - } - - ip, err := rd.ReadStringReply() - if err != nil { - return nil, err - } - - port, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - nodes[j].Addr = net.JoinHostPort(ip, strconv.FormatInt(port, 10)) - - if n == 3 { - id, err := rd.ReadStringReply() - if err != nil { - return nil, err - } - nodes[j].Id = id - } - } - - slots[i] = ClusterSlot{ - Start: int(start), - End: int(end), - Nodes: nodes, - } - } - return slots, nil -} - -func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse { - return func(rd *proto.Reader, n int64) (interface{}, error) { - var loc GeoLocation - var err error - - loc.Name, err = rd.ReadStringReply() - if err != nil { - return nil, err - } - if q.WithDist { - loc.Dist, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - } - if q.WithGeoHash { - loc.GeoHash, err = rd.ReadIntReply() - if err != nil { - return nil, err - } - } - if q.WithCoord { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if n != 2 { - return nil, fmt.Errorf("got %d coordinates, expected 2", n) - } - - loc.Longitude, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - loc.Latitude, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - } - - return &loc, nil - } -} - -func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse { - return func(rd *proto.Reader, n int64) (interface{}, error) { - locs := make([]GeoLocation, 0, n) - for i := int64(0); i < n; i++ { - v, err := rd.ReadReply(newGeoLocationParser(q)) - if err != nil { - return nil, err - } - switch vv := v.(type) { - case []byte: - locs = append(locs, GeoLocation{ - Name: string(vv), - }) - case *GeoLocation: - locs = append(locs, *vv) - default: - return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v) - } - } - return locs, nil - } -} - -func geoPosParser(rd *proto.Reader, n int64) (interface{}, error) { - var pos GeoPos - var err error - - pos.Longitude, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - - pos.Latitude, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - - return &pos, nil -} - -func geoPosSliceParser(rd *proto.Reader, n int64) (interface{}, error) { - positions := make([]*GeoPos, 0, n) - for i := int64(0); i < n; i++ { - v, err := rd.ReadReply(geoPosParser) - if err != nil { - if err == Nil { - positions = append(positions, nil) - continue - } - return nil, err - } - switch v := v.(type) { - case *GeoPos: - positions = append(positions, v) - default: - return nil, fmt.Errorf("got %T, expected *GeoPos", v) - } - } - return positions, nil -} - -func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) { - var cmd CommandInfo - var err error - - if n != 6 { - return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6", n) - } - - cmd.Name, err = rd.ReadStringReply() - if err != nil { - return nil, err - } - - arity, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.Arity = int8(arity) - - flags, err := rd.ReadReply(stringSliceParser) - if err != nil { - return nil, err - } - cmd.Flags = flags.([]string) - - firstKeyPos, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.FirstKeyPos = int8(firstKeyPos) - - lastKeyPos, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.LastKeyPos = int8(lastKeyPos) - - stepCount, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.StepCount = int8(stepCount) - - for _, flag := range cmd.Flags { - if flag == "readonly" { - cmd.ReadOnly = true - break - } - } - - return &cmd, nil -} - -// Implements proto.MultiBulkParse -func commandInfoSliceParser(rd *proto.Reader, n int64) (interface{}, error) { - m := make(map[string]*CommandInfo, n) - for i := int64(0); i < n; i++ { - v, err := rd.ReadReply(commandInfoParser) - if err != nil { - return nil, err - } - vv := v.(*CommandInfo) - m[vv.Name] = vv - - } - return m, nil -} - -// Implements proto.MultiBulkParse -func timeParser(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d elements, expected 2", n) - } - - sec, err := rd.ReadInt() - if err != nil { - return nil, err - } - - microsec, err := rd.ReadInt() - if err != nil { - return nil, err - } - - return time.Unix(sec, microsec*1000), nil -} diff --git a/vendor/github.com/go-redis/redis/pipeline.go b/vendor/github.com/go-redis/redis/pipeline.go deleted file mode 100644 index 9349ef553e..0000000000 --- a/vendor/github.com/go-redis/redis/pipeline.go +++ /dev/null @@ -1,112 +0,0 @@ -package redis - -import ( - "sync" - - "github.com/go-redis/redis/internal/pool" -) - -type pipelineExecer func([]Cmder) error - -type Pipeliner interface { - StatefulCmdable - Process(cmd Cmder) error - Close() error - Discard() error - Exec() ([]Cmder, error) -} - -var _ Pipeliner = (*Pipeline)(nil) - -// Pipeline implements pipelining as described in -// http://redis.io/topics/pipelining. It's safe for concurrent use -// by multiple goroutines. -type Pipeline struct { - statefulCmdable - - exec pipelineExecer - - mu sync.Mutex - cmds []Cmder - closed bool -} - -func (c *Pipeline) Process(cmd Cmder) error { - c.mu.Lock() - c.cmds = append(c.cmds, cmd) - c.mu.Unlock() - return nil -} - -// Close closes the pipeline, releasing any open resources. -func (c *Pipeline) Close() error { - c.mu.Lock() - c.discard() - c.closed = true - c.mu.Unlock() - return nil -} - -// Discard resets the pipeline and discards queued commands. -func (c *Pipeline) Discard() error { - c.mu.Lock() - err := c.discard() - c.mu.Unlock() - return err -} - -func (c *Pipeline) discard() error { - if c.closed { - return pool.ErrClosed - } - c.cmds = c.cmds[:0] - return nil -} - -// Exec executes all previously queued commands using one -// client-server roundtrip. -// -// Exec always returns list of commands and error of the first failed -// command if any. -func (c *Pipeline) Exec() ([]Cmder, error) { - c.mu.Lock() - defer c.mu.Unlock() - - if c.closed { - return nil, pool.ErrClosed - } - - if len(c.cmds) == 0 { - return nil, nil - } - - cmds := c.cmds - c.cmds = nil - - return cmds, c.exec(cmds) -} - -func (c *Pipeline) pipelined(fn func(Pipeliner) error) ([]Cmder, error) { - if err := fn(c); err != nil { - return nil, err - } - cmds, err := c.Exec() - _ = c.Close() - return cmds, err -} - -func (c *Pipeline) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { - return c.pipelined(fn) -} - -func (c *Pipeline) Pipeline() Pipeliner { - return c -} - -func (c *Pipeline) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { - return c.pipelined(fn) -} - -func (c *Pipeline) TxPipeline() Pipeliner { - return c -} diff --git a/vendor/github.com/go-redis/redis/pubsub.go b/vendor/github.com/go-redis/redis/pubsub.go deleted file mode 100644 index 01f8a61aad..0000000000 --- a/vendor/github.com/go-redis/redis/pubsub.go +++ /dev/null @@ -1,401 +0,0 @@ -package redis - -import ( - "fmt" - "net" - "sync" - "time" - - "github.com/go-redis/redis/internal" - "github.com/go-redis/redis/internal/pool" -) - -// PubSub implements Pub/Sub commands as described in -// http://redis.io/topics/pubsub. It's NOT safe for concurrent use by -// multiple goroutines. -// -// PubSub automatically resubscribes to the channels and patterns -// when Redis becomes unavailable. -type PubSub struct { - opt *Options - - newConn func([]string) (*pool.Conn, error) - closeConn func(*pool.Conn) error - - mu sync.Mutex - cn *pool.Conn - channels []string - patterns []string - closed bool - - cmd *Cmd - - chOnce sync.Once - ch chan *Message -} - -func (c *PubSub) conn() (*pool.Conn, error) { - c.mu.Lock() - cn, err := c._conn(nil) - c.mu.Unlock() - return cn, err -} - -func (c *PubSub) _conn(channels []string) (*pool.Conn, error) { - if c.closed { - return nil, pool.ErrClosed - } - - if c.cn != nil { - return c.cn, nil - } - - cn, err := c.newConn(channels) - if err != nil { - return nil, err - } - - if err := c.resubscribe(cn); err != nil { - _ = c.closeConn(cn) - return nil, err - } - - c.cn = cn - return cn, nil -} - -func (c *PubSub) resubscribe(cn *pool.Conn) error { - var firstErr error - if len(c.channels) > 0 { - if err := c._subscribe(cn, "subscribe", c.channels...); err != nil && firstErr == nil { - firstErr = err - } - } - if len(c.patterns) > 0 { - if err := c._subscribe(cn, "psubscribe", c.patterns...); err != nil && firstErr == nil { - firstErr = err - } - } - return firstErr -} - -func (c *PubSub) _subscribe(cn *pool.Conn, redisCmd string, channels ...string) error { - args := make([]interface{}, 1+len(channels)) - args[0] = redisCmd - for i, channel := range channels { - args[1+i] = channel - } - cmd := NewSliceCmd(args...) - - cn.SetWriteTimeout(c.opt.WriteTimeout) - return writeCmd(cn, cmd) -} - -func (c *PubSub) releaseConn(cn *pool.Conn, err error) { - c.mu.Lock() - c._releaseConn(cn, err) - c.mu.Unlock() -} - -func (c *PubSub) _releaseConn(cn *pool.Conn, err error) { - if c.cn != cn { - return - } - if internal.IsBadConn(err, true) { - _ = c.closeTheCn() - } -} - -func (c *PubSub) closeTheCn() error { - err := c.closeConn(c.cn) - c.cn = nil - return err -} - -func (c *PubSub) Close() error { - c.mu.Lock() - defer c.mu.Unlock() - - if c.closed { - return pool.ErrClosed - } - c.closed = true - - if c.cn != nil { - return c.closeTheCn() - } - return nil -} - -// Subscribes the client to the specified channels. It returns -// empty subscription if there are no channels. -func (c *PubSub) Subscribe(channels ...string) error { - c.mu.Lock() - err := c.subscribe("subscribe", channels...) - c.channels = appendIfNotExists(c.channels, channels...) - c.mu.Unlock() - return err -} - -// Subscribes the client to the given patterns. It returns -// empty subscription if there are no patterns. -func (c *PubSub) PSubscribe(patterns ...string) error { - c.mu.Lock() - err := c.subscribe("psubscribe", patterns...) - c.patterns = appendIfNotExists(c.patterns, patterns...) - c.mu.Unlock() - return err -} - -// Unsubscribes the client from the given channels, or from all of -// them if none is given. -func (c *PubSub) Unsubscribe(channels ...string) error { - c.mu.Lock() - err := c.subscribe("unsubscribe", channels...) - c.channels = remove(c.channels, channels...) - c.mu.Unlock() - return err -} - -// Unsubscribes the client from the given patterns, or from all of -// them if none is given. -func (c *PubSub) PUnsubscribe(patterns ...string) error { - c.mu.Lock() - err := c.subscribe("punsubscribe", patterns...) - c.patterns = remove(c.patterns, patterns...) - c.mu.Unlock() - return err -} - -func (c *PubSub) subscribe(redisCmd string, channels ...string) error { - cn, err := c._conn(channels) - if err != nil { - return err - } - - err = c._subscribe(cn, redisCmd, channels...) - c._releaseConn(cn, err) - return err -} - -func (c *PubSub) Ping(payload ...string) error { - args := []interface{}{"ping"} - if len(payload) == 1 { - args = append(args, payload[0]) - } - cmd := NewCmd(args...) - - cn, err := c.conn() - if err != nil { - return err - } - - cn.SetWriteTimeout(c.opt.WriteTimeout) - err = writeCmd(cn, cmd) - c.releaseConn(cn, err) - return err -} - -// Message received after a successful subscription to channel. -type Subscription struct { - // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe". - Kind string - // Channel name we have subscribed to. - Channel string - // Number of channels we are currently subscribed to. - Count int -} - -func (m *Subscription) String() string { - return fmt.Sprintf("%s: %s", m.Kind, m.Channel) -} - -// Message received as result of a PUBLISH command issued by another client. -type Message struct { - Channel string - Pattern string - Payload string -} - -func (m *Message) String() string { - return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload) -} - -// Pong received as result of a PING command issued by another client. -type Pong struct { - Payload string -} - -func (p *Pong) String() string { - if p.Payload != "" { - return fmt.Sprintf("Pong<%s>", p.Payload) - } - return "Pong" -} - -func (c *PubSub) newMessage(reply interface{}) (interface{}, error) { - switch reply := reply.(type) { - case string: - return &Pong{ - Payload: reply, - }, nil - case []interface{}: - switch kind := reply[0].(string); kind { - case "subscribe", "unsubscribe", "psubscribe", "punsubscribe": - return &Subscription{ - Kind: kind, - Channel: reply[1].(string), - Count: int(reply[2].(int64)), - }, nil - case "message": - return &Message{ - Channel: reply[1].(string), - Payload: reply[2].(string), - }, nil - case "pmessage": - return &Message{ - Pattern: reply[1].(string), - Channel: reply[2].(string), - Payload: reply[3].(string), - }, nil - case "pong": - return &Pong{ - Payload: reply[1].(string), - }, nil - default: - return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind) - } - default: - return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply) - } -} - -// ReceiveTimeout acts like Receive but returns an error if message -// is not received in time. This is low-level API and most clients -// should use ReceiveMessage. -func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) { - if c.cmd == nil { - c.cmd = NewCmd() - } - - cn, err := c.conn() - if err != nil { - return nil, err - } - - cn.SetReadTimeout(timeout) - err = c.cmd.readReply(cn) - c.releaseConn(cn, err) - if err != nil { - return nil, err - } - - return c.newMessage(c.cmd.Val()) -} - -// Receive returns a message as a Subscription, Message, Pong or error. -// See PubSub example for details. This is low-level API and most clients -// should use ReceiveMessage. -func (c *PubSub) Receive() (interface{}, error) { - return c.ReceiveTimeout(0) -} - -// ReceiveMessage returns a Message or error ignoring Subscription or Pong -// messages. It automatically reconnects to Redis Server and resubscribes -// to channels in case of network errors. -func (c *PubSub) ReceiveMessage() (*Message, error) { - return c.receiveMessage(5 * time.Second) -} - -func (c *PubSub) receiveMessage(timeout time.Duration) (*Message, error) { - var errNum uint - for { - msgi, err := c.ReceiveTimeout(timeout) - if err != nil { - if !internal.IsNetworkError(err) { - return nil, err - } - - errNum++ - if errNum < 3 { - if netErr, ok := err.(net.Error); ok && netErr.Timeout() { - err := c.Ping() - if err != nil { - internal.Logf("PubSub.Ping failed: %s", err) - } - } - } else { - // 3 consequent errors - connection is broken or - // Redis Server is down. - // Sleep to not exceed max number of open connections. - time.Sleep(time.Second) - } - continue - } - - // Reset error number, because we received a message. - errNum = 0 - - switch msg := msgi.(type) { - case *Subscription: - // Ignore. - case *Pong: - // Ignore. - case *Message: - return msg, nil - default: - return nil, fmt.Errorf("redis: unknown message: %T", msgi) - } - } -} - -// Channel returns a Go channel for concurrently receiving messages. -// The channel is closed with PubSub. Receive or ReceiveMessage APIs -// can not be used after channel is created. -func (c *PubSub) Channel() <-chan *Message { - c.chOnce.Do(func() { - c.ch = make(chan *Message, 100) - go func() { - for { - msg, err := c.ReceiveMessage() - if err != nil { - if err == pool.ErrClosed { - break - } - continue - } - c.ch <- msg - } - close(c.ch) - }() - }) - return c.ch -} - -func appendIfNotExists(ss []string, es ...string) []string { -loop: - for _, e := range es { - for _, s := range ss { - if s == e { - continue loop - } - } - ss = append(ss, e) - } - return ss -} - -func remove(ss []string, es ...string) []string { - if len(es) == 0 { - return ss[:0] - } - for _, e := range es { - for i, s := range ss { - if s == e { - ss = append(ss[:i], ss[i+1:]...) - break - } - } - } - return ss -} diff --git a/vendor/github.com/go-redis/redis/redis.go b/vendor/github.com/go-redis/redis/redis.go deleted file mode 100644 index 37ffafd97b..0000000000 --- a/vendor/github.com/go-redis/redis/redis.go +++ /dev/null @@ -1,450 +0,0 @@ -package redis - -import ( - "fmt" - "log" - "os" - "time" - - "github.com/go-redis/redis/internal" - "github.com/go-redis/redis/internal/pool" - "github.com/go-redis/redis/internal/proto" -) - -// Redis nil reply returned when key does not exist. -const Nil = internal.Nil - -func init() { - SetLogger(log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile)) -} - -func SetLogger(logger *log.Logger) { - internal.Logger = logger -} - -func (c *baseClient) String() string { - return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB) -} - -func (c *baseClient) newConn() (*pool.Conn, error) { - cn, err := c.connPool.NewConn() - if err != nil { - return nil, err - } - - if !cn.Inited { - if err := c.initConn(cn); err != nil { - _ = c.connPool.CloseConn(cn) - return nil, err - } - } - - return cn, nil -} - -func (c *baseClient) getConn() (*pool.Conn, bool, error) { - cn, isNew, err := c.connPool.Get() - if err != nil { - return nil, false, err - } - - if !cn.Inited { - if err := c.initConn(cn); err != nil { - _ = c.connPool.Remove(cn) - return nil, false, err - } - } - - return cn, isNew, nil -} - -func (c *baseClient) releaseConn(cn *pool.Conn, err error) bool { - if internal.IsBadConn(err, false) { - _ = c.connPool.Remove(cn) - return false - } - - _ = c.connPool.Put(cn) - return true -} - -func (c *baseClient) initConn(cn *pool.Conn) error { - cn.Inited = true - - if c.opt.Password == "" && - c.opt.DB == 0 && - !c.opt.readOnly && - c.opt.OnConnect == nil { - return nil - } - - // Temp client to initialize connection. - conn := &Conn{ - baseClient: baseClient{ - opt: c.opt, - connPool: pool.NewSingleConnPool(cn), - }, - } - conn.setProcessor(conn.Process) - - _, err := conn.Pipelined(func(pipe Pipeliner) error { - if c.opt.Password != "" { - pipe.Auth(c.opt.Password) - } - - if c.opt.DB > 0 { - pipe.Select(c.opt.DB) - } - - if c.opt.readOnly { - pipe.ReadOnly() - } - - return nil - }) - if err != nil { - return err - } - - if c.opt.OnConnect != nil { - return c.opt.OnConnect(conn) - } - return nil -} - -// WrapProcess replaces the process func. It takes a function createWrapper -// which is supplied by the user. createWrapper takes the old process func as -// an input and returns the new wrapper process func. createWrapper should -// use call the old process func within the new process func. -func (c *baseClient) WrapProcess(fn func(oldProcess func(cmd Cmder) error) func(cmd Cmder) error) { - c.process = fn(c.defaultProcess) -} - -func (c *baseClient) Process(cmd Cmder) error { - if c.process != nil { - return c.process(cmd) - } - return c.defaultProcess(cmd) -} - -func (c *baseClient) defaultProcess(cmd Cmder) error { - for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { - if attempt > 0 { - time.Sleep(c.retryBackoff(attempt)) - } - - cn, _, err := c.getConn() - if err != nil { - cmd.setErr(err) - if internal.IsRetryableError(err, true) { - continue - } - return err - } - - cn.SetWriteTimeout(c.opt.WriteTimeout) - if err := writeCmd(cn, cmd); err != nil { - c.releaseConn(cn, err) - cmd.setErr(err) - if internal.IsRetryableError(err, true) { - continue - } - return err - } - - cn.SetReadTimeout(c.cmdTimeout(cmd)) - err = cmd.readReply(cn) - c.releaseConn(cn, err) - if err != nil && internal.IsRetryableError(err, cmd.readTimeout() == nil) { - continue - } - - return err - } - - return cmd.Err() -} - -func (c *baseClient) retryBackoff(attempt int) time.Duration { - return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) -} - -func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration { - if timeout := cmd.readTimeout(); timeout != nil { - return *timeout - } else { - return c.opt.ReadTimeout - } -} - -// Close closes the client, releasing any open resources. -// -// It is rare to Close a Client, as the Client is meant to be -// long-lived and shared between many goroutines. -func (c *baseClient) Close() error { - var firstErr error - if c.onClose != nil { - if err := c.onClose(); err != nil && firstErr == nil { - firstErr = err - } - } - if err := c.connPool.Close(); err != nil && firstErr == nil { - firstErr = err - } - return firstErr -} - -func (c *baseClient) getAddr() string { - return c.opt.Addr -} - -type pipelineProcessor func(*pool.Conn, []Cmder) (bool, error) - -func (c *baseClient) pipelineExecer(p pipelineProcessor) pipelineExecer { - return func(cmds []Cmder) error { - for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { - if attempt > 0 { - time.Sleep(c.retryBackoff(attempt)) - } - - cn, _, err := c.getConn() - if err != nil { - setCmdsErr(cmds, err) - return err - } - - canRetry, err := p(cn, cmds) - - if err == nil || internal.IsRedisError(err) { - _ = c.connPool.Put(cn) - break - } - _ = c.connPool.Remove(cn) - - if !canRetry || !internal.IsRetryableError(err, true) { - break - } - } - return firstCmdsErr(cmds) - } -} - -func (c *baseClient) pipelineProcessCmds(cn *pool.Conn, cmds []Cmder) (bool, error) { - cn.SetWriteTimeout(c.opt.WriteTimeout) - if err := writeCmd(cn, cmds...); err != nil { - setCmdsErr(cmds, err) - return true, err - } - - // Set read timeout for all commands. - cn.SetReadTimeout(c.opt.ReadTimeout) - return true, pipelineReadCmds(cn, cmds) -} - -func pipelineReadCmds(cn *pool.Conn, cmds []Cmder) error { - for _, cmd := range cmds { - err := cmd.readReply(cn) - if err != nil && !internal.IsRedisError(err) { - return err - } - } - return nil -} - -func (c *baseClient) txPipelineProcessCmds(cn *pool.Conn, cmds []Cmder) (bool, error) { - cn.SetWriteTimeout(c.opt.WriteTimeout) - if err := txPipelineWriteMulti(cn, cmds); err != nil { - setCmdsErr(cmds, err) - return true, err - } - - // Set read timeout for all commands. - cn.SetReadTimeout(c.opt.ReadTimeout) - - if err := c.txPipelineReadQueued(cn, cmds); err != nil { - setCmdsErr(cmds, err) - return false, err - } - - return false, pipelineReadCmds(cn, cmds) -} - -func txPipelineWriteMulti(cn *pool.Conn, cmds []Cmder) error { - multiExec := make([]Cmder, 0, len(cmds)+2) - multiExec = append(multiExec, NewStatusCmd("MULTI")) - multiExec = append(multiExec, cmds...) - multiExec = append(multiExec, NewSliceCmd("EXEC")) - return writeCmd(cn, multiExec...) -} - -func (c *baseClient) txPipelineReadQueued(cn *pool.Conn, cmds []Cmder) error { - // Parse queued replies. - var statusCmd StatusCmd - if err := statusCmd.readReply(cn); err != nil { - return err - } - - for _ = range cmds { - err := statusCmd.readReply(cn) - if err != nil && !internal.IsRedisError(err) { - return err - } - } - - // Parse number of replies. - line, err := cn.Rd.ReadLine() - if err != nil { - if err == Nil { - err = TxFailedErr - } - return err - } - - switch line[0] { - case proto.ErrorReply: - return proto.ParseErrorReply(line) - case proto.ArrayReply: - // ok - default: - err := fmt.Errorf("redis: expected '*', but got line %q", line) - return err - } - - return nil -} - -//------------------------------------------------------------------------------ - -// Client is a Redis client representing a pool of zero or more -// underlying connections. It's safe for concurrent use by multiple -// goroutines. -type Client struct { - baseClient - cmdable -} - -func newClient(opt *Options, pool pool.Pooler) *Client { - client := Client{ - baseClient: baseClient{ - opt: opt, - connPool: pool, - }, - } - client.setProcessor(client.Process) - return &client -} - -// NewClient returns a client to the Redis Server specified by Options. -func NewClient(opt *Options) *Client { - opt.init() - return newClient(opt, newConnPool(opt)) -} - -func (c *Client) copy() *Client { - c2 := new(Client) - *c2 = *c - c2.setProcessor(c2.Process) - return c2 -} - -// Options returns read-only Options that were used to create the client. -func (c *Client) Options() *Options { - return c.opt -} - -type PoolStats pool.Stats - -// PoolStats returns connection pool stats. -func (c *Client) PoolStats() *PoolStats { - stats := c.connPool.Stats() - return (*PoolStats)(stats) -} - -func (c *Client) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { - return c.Pipeline().Pipelined(fn) -} - -func (c *Client) Pipeline() Pipeliner { - pipe := Pipeline{ - exec: c.pipelineExecer(c.pipelineProcessCmds), - } - pipe.setProcessor(pipe.Process) - return &pipe -} - -func (c *Client) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { - return c.TxPipeline().Pipelined(fn) -} - -// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. -func (c *Client) TxPipeline() Pipeliner { - pipe := Pipeline{ - exec: c.pipelineExecer(c.txPipelineProcessCmds), - } - pipe.setProcessor(pipe.Process) - return &pipe -} - -func (c *Client) pubSub() *PubSub { - return &PubSub{ - opt: c.opt, - - newConn: func(channels []string) (*pool.Conn, error) { - return c.newConn() - }, - closeConn: c.connPool.CloseConn, - } -} - -// Subscribe subscribes the client to the specified channels. -// Channels can be omitted to create empty subscription. -func (c *Client) Subscribe(channels ...string) *PubSub { - pubsub := c.pubSub() - if len(channels) > 0 { - _ = pubsub.Subscribe(channels...) - } - return pubsub -} - -// PSubscribe subscribes the client to the given patterns. -// Patterns can be omitted to create empty subscription. -func (c *Client) PSubscribe(channels ...string) *PubSub { - pubsub := c.pubSub() - if len(channels) > 0 { - _ = pubsub.PSubscribe(channels...) - } - return pubsub -} - -//------------------------------------------------------------------------------ - -// Conn is like Client, but its pool contains single connection. -type Conn struct { - baseClient - statefulCmdable -} - -func (c *Conn) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { - return c.Pipeline().Pipelined(fn) -} - -func (c *Conn) Pipeline() Pipeliner { - pipe := Pipeline{ - exec: c.pipelineExecer(c.pipelineProcessCmds), - } - pipe.setProcessor(pipe.Process) - return &pipe -} - -func (c *Conn) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { - return c.TxPipeline().Pipelined(fn) -} - -// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. -func (c *Conn) TxPipeline() Pipeliner { - pipe := Pipeline{ - exec: c.pipelineExecer(c.txPipelineProcessCmds), - } - pipe.setProcessor(pipe.Process) - return &pipe -} diff --git a/vendor/github.com/go-redis/redis/redis_context.go b/vendor/github.com/go-redis/redis/redis_context.go deleted file mode 100644 index 6ec811ca5c..0000000000 --- a/vendor/github.com/go-redis/redis/redis_context.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build go1.7 - -package redis - -import ( - "context" - - "github.com/go-redis/redis/internal/pool" -) - -type baseClient struct { - connPool pool.Pooler - opt *Options - - process func(Cmder) error - onClose func() error // hook called when client is closed - - ctx context.Context -} - -func (c *Client) Context() context.Context { - if c.ctx != nil { - return c.ctx - } - return context.Background() -} - -func (c *Client) WithContext(ctx context.Context) *Client { - if ctx == nil { - panic("nil context") - } - c2 := c.copy() - c2.ctx = ctx - return c2 -} diff --git a/vendor/github.com/go-redis/redis/redis_no_context.go b/vendor/github.com/go-redis/redis/redis_no_context.go deleted file mode 100644 index 0752192f15..0000000000 --- a/vendor/github.com/go-redis/redis/redis_no_context.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.7 - -package redis - -import ( - "github.com/go-redis/redis/internal/pool" -) - -type baseClient struct { - connPool pool.Pooler - opt *Options - - process func(Cmder) error - onClose func() error // hook called when client is closed -} diff --git a/vendor/github.com/go-redis/redis/result.go b/vendor/github.com/go-redis/redis/result.go deleted file mode 100644 index 28cea5ca83..0000000000 --- a/vendor/github.com/go-redis/redis/result.go +++ /dev/null @@ -1,140 +0,0 @@ -package redis - -import "time" - -// NewCmdResult returns a Cmd initalised with val and err for testing -func NewCmdResult(val interface{}, err error) *Cmd { - var cmd Cmd - cmd.val = val - cmd.setErr(err) - return &cmd -} - -// NewSliceResult returns a SliceCmd initalised with val and err for testing -func NewSliceResult(val []interface{}, err error) *SliceCmd { - var cmd SliceCmd - cmd.val = val - cmd.setErr(err) - return &cmd -} - -// NewStatusResult returns a StatusCmd initalised with val and err for testing -func NewStatusResult(val string, err error) *StatusCmd { - var cmd StatusCmd - cmd.val = val - cmd.setErr(err) - return &cmd -} - -// NewIntResult returns an IntCmd initalised with val and err for testing -func NewIntResult(val int64, err error) *IntCmd { - var cmd IntCmd - cmd.val = val - cmd.setErr(err) - return &cmd -} - -// NewDurationResult returns a DurationCmd initalised with val and err for testing -func NewDurationResult(val time.Duration, err error) *DurationCmd { - var cmd DurationCmd - cmd.val = val - cmd.setErr(err) - return &cmd -} - -// NewBoolResult returns a BoolCmd initalised with val and err for testing -func NewBoolResult(val bool, err error) *BoolCmd { - var cmd BoolCmd - cmd.val = val - cmd.setErr(err) - return &cmd -} - -// NewStringResult returns a StringCmd initalised with val and err for testing -func NewStringResult(val string, err error) *StringCmd { - var cmd StringCmd - cmd.val = []byte(val) - cmd.setErr(err) - return &cmd -} - -// NewFloatResult returns a FloatCmd initalised with val and err for testing -func NewFloatResult(val float64, err error) *FloatCmd { - var cmd FloatCmd - cmd.val = val - cmd.setErr(err) - return &cmd -} - -// NewStringSliceResult returns a StringSliceCmd initalised with val and err for testing -func NewStringSliceResult(val []string, err error) *StringSliceCmd { - var cmd StringSliceCmd - cmd.val = val - cmd.setErr(err) - return &cmd -} - -// NewBoolSliceResult returns a BoolSliceCmd initalised with val and err for testing -func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd { - var cmd BoolSliceCmd - cmd.val = val - cmd.setErr(err) - return &cmd -} - -// NewStringStringMapResult returns a StringStringMapCmd initalised with val and err for testing -func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd { - var cmd StringStringMapCmd - cmd.val = val - cmd.setErr(err) - return &cmd -} - -// NewStringIntMapCmdResult returns a StringIntMapCmd initalised with val and err for testing -func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd { - var cmd StringIntMapCmd - cmd.val = val - cmd.setErr(err) - return &cmd -} - -// NewZSliceCmdResult returns a ZSliceCmd initalised with val and err for testing -func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd { - var cmd ZSliceCmd - cmd.val = val - cmd.setErr(err) - return &cmd -} - -// NewScanCmdResult returns a ScanCmd initalised with val and err for testing -func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd { - var cmd ScanCmd - cmd.page = keys - cmd.cursor = cursor - cmd.setErr(err) - return &cmd -} - -// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initalised with val and err for testing -func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd { - var cmd ClusterSlotsCmd - cmd.val = val - cmd.setErr(err) - return &cmd -} - -// NewGeoLocationCmdResult returns a GeoLocationCmd initalised with val and err for testing -func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd { - var cmd GeoLocationCmd - cmd.locations = val - cmd.setErr(err) - return &cmd -} - -// NewCommandsInfoCmdResult returns a CommandsInfoCmd initalised with val and err for testing -func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd { - var cmd CommandsInfoCmd - cmd.val = val - cmd.setErr(err) - return &cmd -} diff --git a/vendor/github.com/go-redis/redis/ring.go b/vendor/github.com/go-redis/redis/ring.go deleted file mode 100644 index c11ef6bc2f..0000000000 --- a/vendor/github.com/go-redis/redis/ring.go +++ /dev/null @@ -1,510 +0,0 @@ -package redis - -import ( - "errors" - "fmt" - "math/rand" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/go-redis/redis/internal" - "github.com/go-redis/redis/internal/consistenthash" - "github.com/go-redis/redis/internal/hashtag" - "github.com/go-redis/redis/internal/pool" -) - -var errRingShardsDown = errors.New("redis: all ring shards are down") - -// RingOptions are used to configure a ring client and should be -// passed to NewRing. -type RingOptions struct { - // Map of name => host:port addresses of ring shards. - Addrs map[string]string - - // Frequency of PING commands sent to check shards availability. - // Shard is considered down after 3 subsequent failed checks. - HeartbeatFrequency time.Duration - - // Following options are copied from Options struct. - - OnConnect func(*Conn) error - - DB int - Password string - - MaxRetries int - MinRetryBackoff time.Duration - MaxRetryBackoff time.Duration - - DialTimeout time.Duration - ReadTimeout time.Duration - WriteTimeout time.Duration - - PoolSize int - PoolTimeout time.Duration - IdleTimeout time.Duration - IdleCheckFrequency time.Duration -} - -func (opt *RingOptions) init() { - if opt.HeartbeatFrequency == 0 { - opt.HeartbeatFrequency = 500 * time.Millisecond - } - - switch opt.MinRetryBackoff { - case -1: - opt.MinRetryBackoff = 0 - case 0: - opt.MinRetryBackoff = 8 * time.Millisecond - } - switch opt.MaxRetryBackoff { - case -1: - opt.MaxRetryBackoff = 0 - case 0: - opt.MaxRetryBackoff = 512 * time.Millisecond - } -} - -func (opt *RingOptions) clientOptions() *Options { - return &Options{ - OnConnect: opt.OnConnect, - - DB: opt.DB, - Password: opt.Password, - - DialTimeout: opt.DialTimeout, - ReadTimeout: opt.ReadTimeout, - WriteTimeout: opt.WriteTimeout, - - PoolSize: opt.PoolSize, - PoolTimeout: opt.PoolTimeout, - IdleTimeout: opt.IdleTimeout, - IdleCheckFrequency: opt.IdleCheckFrequency, - } -} - -type ringShard struct { - Client *Client - down int32 -} - -func (shard *ringShard) String() string { - var state string - if shard.IsUp() { - state = "up" - } else { - state = "down" - } - return fmt.Sprintf("%s is %s", shard.Client, state) -} - -func (shard *ringShard) IsDown() bool { - const threshold = 3 - return atomic.LoadInt32(&shard.down) >= threshold -} - -func (shard *ringShard) IsUp() bool { - return !shard.IsDown() -} - -// Vote votes to set shard state and returns true if state was changed. -func (shard *ringShard) Vote(up bool) bool { - if up { - changed := shard.IsDown() - atomic.StoreInt32(&shard.down, 0) - return changed - } - - if shard.IsDown() { - return false - } - - atomic.AddInt32(&shard.down, 1) - return shard.IsDown() -} - -// Ring is a Redis client that uses constistent hashing to distribute -// keys across multiple Redis servers (shards). It's safe for -// concurrent use by multiple goroutines. -// -// Ring monitors the state of each shard and removes dead shards from -// the ring. When shard comes online it is added back to the ring. This -// gives you maximum availability and partition tolerance, but no -// consistency between different shards or even clients. Each client -// uses shards that are available to the client and does not do any -// coordination when shard state is changed. -// -// Ring should be used when you need multiple Redis servers for caching -// and can tolerate losing data when one of the servers dies. -// Otherwise you should use Redis Cluster. -type Ring struct { - cmdable - - opt *RingOptions - nreplicas int - - mu sync.RWMutex - hash *consistenthash.Map - shards map[string]*ringShard - shardsList []*ringShard - - cmdsInfoOnce internal.Once - cmdsInfo map[string]*CommandInfo - - closed bool -} - -func NewRing(opt *RingOptions) *Ring { - const nreplicas = 100 - opt.init() - ring := &Ring{ - opt: opt, - nreplicas: nreplicas, - - hash: consistenthash.New(nreplicas, nil), - shards: make(map[string]*ringShard), - } - ring.setProcessor(ring.Process) - for name, addr := range opt.Addrs { - clopt := opt.clientOptions() - clopt.Addr = addr - ring.addShard(name, NewClient(clopt)) - } - go ring.heartbeat() - return ring -} - -func (c *Ring) addShard(name string, cl *Client) { - shard := &ringShard{Client: cl} - c.mu.Lock() - c.hash.Add(name) - c.shards[name] = shard - c.shardsList = append(c.shardsList, shard) - c.mu.Unlock() -} - -// Options returns read-only Options that were used to create the client. -func (c *Ring) Options() *RingOptions { - return c.opt -} - -func (c *Ring) retryBackoff(attempt int) time.Duration { - return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) -} - -// PoolStats returns accumulated connection pool stats. -func (c *Ring) PoolStats() *PoolStats { - c.mu.RLock() - shards := c.shardsList - c.mu.RUnlock() - - var acc PoolStats - for _, shard := range shards { - s := shard.Client.connPool.Stats() - acc.Hits += s.Hits - acc.Misses += s.Misses - acc.Timeouts += s.Timeouts - acc.TotalConns += s.TotalConns - acc.FreeConns += s.FreeConns - } - return &acc -} - -// Subscribe subscribes the client to the specified channels. -func (c *Ring) Subscribe(channels ...string) *PubSub { - if len(channels) == 0 { - panic("at least one channel is required") - } - - shard, err := c.shardByKey(channels[0]) - if err != nil { - // TODO: return PubSub with sticky error - panic(err) - } - return shard.Client.Subscribe(channels...) -} - -// PSubscribe subscribes the client to the given patterns. -func (c *Ring) PSubscribe(channels ...string) *PubSub { - if len(channels) == 0 { - panic("at least one channel is required") - } - - shard, err := c.shardByKey(channels[0]) - if err != nil { - // TODO: return PubSub with sticky error - panic(err) - } - return shard.Client.PSubscribe(channels...) -} - -// ForEachShard concurrently calls the fn on each live shard in the ring. -// It returns the first error if any. -func (c *Ring) ForEachShard(fn func(client *Client) error) error { - c.mu.RLock() - shards := c.shardsList - c.mu.RUnlock() - - var wg sync.WaitGroup - errCh := make(chan error, 1) - for _, shard := range shards { - if shard.IsDown() { - continue - } - - wg.Add(1) - go func(shard *ringShard) { - defer wg.Done() - err := fn(shard.Client) - if err != nil { - select { - case errCh <- err: - default: - } - } - }(shard) - } - wg.Wait() - - select { - case err := <-errCh: - return err - default: - return nil - } -} - -func (c *Ring) cmdInfo(name string) *CommandInfo { - err := c.cmdsInfoOnce.Do(func() error { - c.mu.RLock() - shards := c.shardsList - c.mu.RUnlock() - - var firstErr error - for _, shard := range shards { - cmdsInfo, err := shard.Client.Command().Result() - if err == nil { - c.cmdsInfo = cmdsInfo - return nil - } - if firstErr == nil { - firstErr = err - } - } - return firstErr - }) - if err != nil { - return nil - } - if c.cmdsInfo == nil { - return nil - } - info := c.cmdsInfo[name] - if info == nil { - internal.Logf("info for cmd=%s not found", name) - } - return info -} - -func (c *Ring) shardByKey(key string) (*ringShard, error) { - key = hashtag.Key(key) - - c.mu.RLock() - - if c.closed { - c.mu.RUnlock() - return nil, pool.ErrClosed - } - - name := c.hash.Get(key) - if name == "" { - c.mu.RUnlock() - return nil, errRingShardsDown - } - - shard := c.shards[name] - c.mu.RUnlock() - return shard, nil -} - -func (c *Ring) randomShard() (*ringShard, error) { - return c.shardByKey(strconv.Itoa(rand.Int())) -} - -func (c *Ring) shardByName(name string) (*ringShard, error) { - if name == "" { - return c.randomShard() - } - - c.mu.RLock() - shard := c.shards[name] - c.mu.RUnlock() - return shard, nil -} - -func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) { - cmdInfo := c.cmdInfo(cmd.Name()) - pos := cmdFirstKeyPos(cmd, cmdInfo) - if pos == 0 { - return c.randomShard() - } - firstKey := cmd.stringArg(pos) - return c.shardByKey(firstKey) -} - -func (c *Ring) Process(cmd Cmder) error { - shard, err := c.cmdShard(cmd) - if err != nil { - cmd.setErr(err) - return err - } - return shard.Client.Process(cmd) -} - -// rebalance removes dead shards from the Ring. -func (c *Ring) rebalance() { - hash := consistenthash.New(c.nreplicas, nil) - for name, shard := range c.shards { - if shard.IsUp() { - hash.Add(name) - } - } - - c.mu.Lock() - c.hash = hash - c.mu.Unlock() -} - -// heartbeat monitors state of each shard in the ring. -func (c *Ring) heartbeat() { - ticker := time.NewTicker(c.opt.HeartbeatFrequency) - defer ticker.Stop() - for range ticker.C { - var rebalance bool - - c.mu.RLock() - - if c.closed { - c.mu.RUnlock() - break - } - - shards := c.shardsList - c.mu.RUnlock() - - for _, shard := range shards { - err := shard.Client.Ping().Err() - if shard.Vote(err == nil || err == pool.ErrPoolTimeout) { - internal.Logf("ring shard state changed: %s", shard) - rebalance = true - } - } - - if rebalance { - c.rebalance() - } - } -} - -// Close closes the ring client, releasing any open resources. -// -// It is rare to Close a Ring, as the Ring is meant to be long-lived -// and shared between many goroutines. -func (c *Ring) Close() error { - c.mu.Lock() - defer c.mu.Unlock() - - if c.closed { - return nil - } - c.closed = true - - var firstErr error - for _, shard := range c.shards { - if err := shard.Client.Close(); err != nil && firstErr == nil { - firstErr = err - } - } - c.hash = nil - c.shards = nil - c.shardsList = nil - - return firstErr -} - -func (c *Ring) Pipeline() Pipeliner { - pipe := Pipeline{ - exec: c.pipelineExec, - } - pipe.setProcessor(pipe.Process) - return &pipe -} - -func (c *Ring) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { - return c.Pipeline().Pipelined(fn) -} - -func (c *Ring) pipelineExec(cmds []Cmder) error { - cmdsMap := make(map[string][]Cmder) - for _, cmd := range cmds { - cmdInfo := c.cmdInfo(cmd.Name()) - name := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo)) - if name != "" { - name = c.hash.Get(hashtag.Key(name)) - } - cmdsMap[name] = append(cmdsMap[name], cmd) - } - - for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { - if attempt > 0 { - time.Sleep(c.retryBackoff(attempt)) - } - - var failedCmdsMap map[string][]Cmder - - for name, cmds := range cmdsMap { - shard, err := c.shardByName(name) - if err != nil { - setCmdsErr(cmds, err) - continue - } - - cn, _, err := shard.Client.getConn() - if err != nil { - setCmdsErr(cmds, err) - continue - } - - canRetry, err := shard.Client.pipelineProcessCmds(cn, cmds) - if err == nil || internal.IsRedisError(err) { - _ = shard.Client.connPool.Put(cn) - continue - } - _ = shard.Client.connPool.Remove(cn) - - if canRetry && internal.IsRetryableError(err, true) { - if failedCmdsMap == nil { - failedCmdsMap = make(map[string][]Cmder) - } - failedCmdsMap[name] = cmds - } - } - - if len(failedCmdsMap) == 0 { - break - } - cmdsMap = failedCmdsMap - } - - return firstCmdsErr(cmds) -} - -func (c *Ring) TxPipeline() Pipeliner { - panic("not implemented") -} - -func (c *Ring) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { - panic("not implemented") -} diff --git a/vendor/github.com/go-redis/redis/script.go b/vendor/github.com/go-redis/redis/script.go deleted file mode 100644 index 74135f5a5c..0000000000 --- a/vendor/github.com/go-redis/redis/script.go +++ /dev/null @@ -1,62 +0,0 @@ -package redis - -import ( - "crypto/sha1" - "encoding/hex" - "io" - "strings" -) - -type scripter interface { - Eval(script string, keys []string, args ...interface{}) *Cmd - EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd - ScriptExists(scripts ...string) *BoolSliceCmd - ScriptLoad(script string) *StringCmd -} - -var _ scripter = (*Client)(nil) -var _ scripter = (*Ring)(nil) -var _ scripter = (*ClusterClient)(nil) - -type Script struct { - src, hash string -} - -func NewScript(src string) *Script { - h := sha1.New() - io.WriteString(h, src) - return &Script{ - src: src, - hash: hex.EncodeToString(h.Sum(nil)), - } -} - -func (s *Script) Hash() string { - return s.hash -} - -func (s *Script) Load(c scripter) *StringCmd { - return c.ScriptLoad(s.src) -} - -func (s *Script) Exists(c scripter) *BoolSliceCmd { - return c.ScriptExists(s.src) -} - -func (s *Script) Eval(c scripter, keys []string, args ...interface{}) *Cmd { - return c.Eval(s.src, keys, args...) -} - -func (s *Script) EvalSha(c scripter, keys []string, args ...interface{}) *Cmd { - return c.EvalSha(s.hash, keys, args...) -} - -// Run optimistically uses EVALSHA to run the script. If script does not exist -// it is retried using EVAL. -func (s *Script) Run(c scripter, keys []string, args ...interface{}) *Cmd { - r := s.EvalSha(c, keys, args...) - if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") { - return s.Eval(c, keys, args...) - } - return r -} diff --git a/vendor/github.com/go-redis/redis/sentinel.go b/vendor/github.com/go-redis/redis/sentinel.go deleted file mode 100644 index 37d06b4821..0000000000 --- a/vendor/github.com/go-redis/redis/sentinel.go +++ /dev/null @@ -1,337 +0,0 @@ -package redis - -import ( - "errors" - "net" - "strings" - "sync" - "time" - - "github.com/go-redis/redis/internal" - "github.com/go-redis/redis/internal/pool" -) - -//------------------------------------------------------------------------------ - -// FailoverOptions are used to configure a failover client and should -// be passed to NewFailoverClient. -type FailoverOptions struct { - // The master name. - MasterName string - // A seed list of host:port addresses of sentinel nodes. - SentinelAddrs []string - - // Following options are copied from Options struct. - - OnConnect func(*Conn) error - - Password string - DB int - - MaxRetries int - - DialTimeout time.Duration - ReadTimeout time.Duration - WriteTimeout time.Duration - - PoolSize int - PoolTimeout time.Duration - IdleTimeout time.Duration - IdleCheckFrequency time.Duration -} - -func (opt *FailoverOptions) options() *Options { - return &Options{ - Addr: "FailoverClient", - - OnConnect: opt.OnConnect, - - DB: opt.DB, - Password: opt.Password, - - MaxRetries: opt.MaxRetries, - - DialTimeout: opt.DialTimeout, - ReadTimeout: opt.ReadTimeout, - WriteTimeout: opt.WriteTimeout, - - PoolSize: opt.PoolSize, - PoolTimeout: opt.PoolTimeout, - IdleTimeout: opt.IdleTimeout, - IdleCheckFrequency: opt.IdleCheckFrequency, - } -} - -// NewFailoverClient returns a Redis client that uses Redis Sentinel -// for automatic failover. It's safe for concurrent use by multiple -// goroutines. -func NewFailoverClient(failoverOpt *FailoverOptions) *Client { - opt := failoverOpt.options() - opt.init() - - failover := &sentinelFailover{ - masterName: failoverOpt.MasterName, - sentinelAddrs: failoverOpt.SentinelAddrs, - - opt: opt, - } - - client := Client{ - baseClient: baseClient{ - opt: opt, - connPool: failover.Pool(), - - onClose: func() error { - return failover.Close() - }, - }, - } - client.setProcessor(client.Process) - - return &client -} - -//------------------------------------------------------------------------------ - -type sentinelClient struct { - cmdable - baseClient -} - -func newSentinel(opt *Options) *sentinelClient { - opt.init() - client := sentinelClient{ - baseClient: baseClient{ - opt: opt, - connPool: newConnPool(opt), - }, - } - client.cmdable = cmdable{client.Process} - return &client -} - -func (c *sentinelClient) PubSub() *PubSub { - return &PubSub{ - opt: c.opt, - - newConn: func(channels []string) (*pool.Conn, error) { - return c.newConn() - }, - closeConn: c.connPool.CloseConn, - } -} - -func (c *sentinelClient) GetMasterAddrByName(name string) *StringSliceCmd { - cmd := NewStringSliceCmd("SENTINEL", "get-master-addr-by-name", name) - c.Process(cmd) - return cmd -} - -func (c *sentinelClient) Sentinels(name string) *SliceCmd { - cmd := NewSliceCmd("SENTINEL", "sentinels", name) - c.Process(cmd) - return cmd -} - -type sentinelFailover struct { - sentinelAddrs []string - - opt *Options - - pool *pool.ConnPool - poolOnce sync.Once - - mu sync.RWMutex - masterName string - _masterAddr string - sentinel *sentinelClient -} - -func (d *sentinelFailover) Close() error { - return d.resetSentinel() -} - -func (d *sentinelFailover) Pool() *pool.ConnPool { - d.poolOnce.Do(func() { - d.opt.Dialer = d.dial - d.pool = newConnPool(d.opt) - }) - return d.pool -} - -func (d *sentinelFailover) dial() (net.Conn, error) { - addr, err := d.MasterAddr() - if err != nil { - return nil, err - } - return net.DialTimeout("tcp", addr, d.opt.DialTimeout) -} - -func (d *sentinelFailover) MasterAddr() (string, error) { - d.mu.Lock() - defer d.mu.Unlock() - - addr, err := d.masterAddr() - if err != nil { - return "", err - } - - if d._masterAddr != addr { - d.switchMaster(addr) - } - - return addr, nil -} - -func (d *sentinelFailover) masterAddr() (string, error) { - // Try last working sentinel. - if d.sentinel != nil { - addr, err := d.sentinel.GetMasterAddrByName(d.masterName).Result() - if err == nil { - addr := net.JoinHostPort(addr[0], addr[1]) - internal.Logf("sentinel: master=%q addr=%q", d.masterName, addr) - return addr, nil - } - - internal.Logf("sentinel: GetMasterAddrByName name=%q failed: %s", d.masterName, err) - d._resetSentinel() - } - - for i, sentinelAddr := range d.sentinelAddrs { - sentinel := newSentinel(&Options{ - Addr: sentinelAddr, - - DialTimeout: d.opt.DialTimeout, - ReadTimeout: d.opt.ReadTimeout, - WriteTimeout: d.opt.WriteTimeout, - - PoolSize: d.opt.PoolSize, - PoolTimeout: d.opt.PoolTimeout, - IdleTimeout: d.opt.IdleTimeout, - }) - - masterAddr, err := sentinel.GetMasterAddrByName(d.masterName).Result() - if err != nil { - internal.Logf("sentinel: GetMasterAddrByName master=%q failed: %s", d.masterName, err) - sentinel.Close() - continue - } - - // Push working sentinel to the top. - d.sentinelAddrs[0], d.sentinelAddrs[i] = d.sentinelAddrs[i], d.sentinelAddrs[0] - d.setSentinel(sentinel) - - addr := net.JoinHostPort(masterAddr[0], masterAddr[1]) - return addr, nil - } - - return "", errors.New("redis: all sentinels are unreachable") -} - -func (d *sentinelFailover) switchMaster(masterAddr string) { - internal.Logf( - "sentinel: new master=%q addr=%q", - d.masterName, masterAddr, - ) - _ = d.Pool().Filter(func(cn *pool.Conn) bool { - return cn.RemoteAddr().String() != masterAddr - }) - d._masterAddr = masterAddr -} - -func (d *sentinelFailover) setSentinel(sentinel *sentinelClient) { - d.discoverSentinels(sentinel) - d.sentinel = sentinel - go d.listen(sentinel) -} - -func (d *sentinelFailover) resetSentinel() error { - var err error - d.mu.Lock() - if d.sentinel != nil { - err = d._resetSentinel() - } - d.mu.Unlock() - return err -} - -func (d *sentinelFailover) _resetSentinel() error { - err := d.sentinel.Close() - d.sentinel = nil - return err -} - -func (d *sentinelFailover) discoverSentinels(sentinel *sentinelClient) { - sentinels, err := sentinel.Sentinels(d.masterName).Result() - if err != nil { - internal.Logf("sentinel: Sentinels master=%q failed: %s", d.masterName, err) - return - } - for _, sentinel := range sentinels { - vals := sentinel.([]interface{}) - for i := 0; i < len(vals); i += 2 { - key := vals[i].(string) - if key == "name" { - sentinelAddr := vals[i+1].(string) - if !contains(d.sentinelAddrs, sentinelAddr) { - internal.Logf( - "sentinel: discovered new sentinel=%q for master=%q", - sentinelAddr, d.masterName, - ) - d.sentinelAddrs = append(d.sentinelAddrs, sentinelAddr) - } - } - } - } -} - -func (d *sentinelFailover) listen(sentinel *sentinelClient) { - var pubsub *PubSub - for { - if pubsub == nil { - pubsub = sentinel.PubSub() - - if err := pubsub.Subscribe("+switch-master"); err != nil { - internal.Logf("sentinel: Subscribe failed: %s", err) - pubsub.Close() - d.resetSentinel() - return - } - } - - msg, err := pubsub.ReceiveMessage() - if err != nil { - if err != pool.ErrClosed { - internal.Logf("sentinel: ReceiveMessage failed: %s", err) - pubsub.Close() - } - d.resetSentinel() - return - } - - switch msg.Channel { - case "+switch-master": - parts := strings.Split(msg.Payload, " ") - if parts[0] != d.masterName { - internal.Logf("sentinel: ignore addr for master=%q", parts[0]) - continue - } - addr := net.JoinHostPort(parts[3], parts[4]) - - d.mu.Lock() - if d._masterAddr != addr { - d.switchMaster(addr) - } - d.mu.Unlock() - } - } -} - -func contains(slice []string, str string) bool { - for _, s := range slice { - if s == str { - return true - } - } - return false -} diff --git a/vendor/github.com/go-redis/redis/tx.go b/vendor/github.com/go-redis/redis/tx.go deleted file mode 100644 index 11d5d5cb00..0000000000 --- a/vendor/github.com/go-redis/redis/tx.go +++ /dev/null @@ -1,103 +0,0 @@ -package redis - -import ( - "github.com/go-redis/redis/internal" - "github.com/go-redis/redis/internal/pool" -) - -// Redis transaction failed. -const TxFailedErr = internal.RedisError("redis: transaction failed") - -// Tx implements Redis transactions as described in -// http://redis.io/topics/transactions. It's NOT safe for concurrent use -// by multiple goroutines, because Exec resets list of watched keys. -// If you don't need WATCH it is better to use Pipeline. -type Tx struct { - statefulCmdable - baseClient -} - -func (c *Client) newTx() *Tx { - tx := Tx{ - baseClient: baseClient{ - opt: c.opt, - connPool: pool.NewStickyConnPool(c.connPool.(*pool.ConnPool), true), - }, - } - tx.setProcessor(tx.Process) - return &tx -} - -func (c *Client) Watch(fn func(*Tx) error, keys ...string) error { - tx := c.newTx() - if len(keys) > 0 { - if err := tx.Watch(keys...).Err(); err != nil { - _ = tx.Close() - return err - } - } - - err := fn(tx) - _ = tx.Close() - return err -} - -// close closes the transaction, releasing any open resources. -func (c *Tx) Close() error { - _ = c.Unwatch().Err() - return c.baseClient.Close() -} - -// Watch marks the keys to be watched for conditional execution -// of a transaction. -func (c *Tx) Watch(keys ...string) *StatusCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "watch" - for i, key := range keys { - args[1+i] = key - } - cmd := NewStatusCmd(args...) - c.Process(cmd) - return cmd -} - -// Unwatch flushes all the previously watched keys for a transaction. -func (c *Tx) Unwatch(keys ...string) *StatusCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "unwatch" - for i, key := range keys { - args[1+i] = key - } - cmd := NewStatusCmd(args...) - c.Process(cmd) - return cmd -} - -func (c *Tx) Pipeline() Pipeliner { - pipe := Pipeline{ - exec: c.pipelineExecer(c.txPipelineProcessCmds), - } - pipe.setProcessor(pipe.Process) - return &pipe -} - -// Pipelined executes commands queued in the fn in a transaction -// and restores the connection state to normal. -// -// When using WATCH, EXEC will execute commands only if the watched keys -// were not modified, allowing for a check-and-set mechanism. -// -// Exec always returns list of commands. If transaction fails -// TxFailedErr is returned. Otherwise Exec returns error of the first -// failed command or nil. -func (c *Tx) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { - return c.Pipeline().Pipelined(fn) -} - -func (c *Tx) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { - return c.Pipelined(fn) -} - -func (c *Tx) TxPipeline() Pipeliner { - return c.Pipeline() -} diff --git a/vendor/github.com/go-redis/redis/universal.go b/vendor/github.com/go-redis/redis/universal.go deleted file mode 100644 index ea42f69847..0000000000 --- a/vendor/github.com/go-redis/redis/universal.go +++ /dev/null @@ -1,138 +0,0 @@ -package redis - -import "time" - -// UniversalOptions information is required by UniversalClient to establish -// connections. -type UniversalOptions struct { - // Either a single address or a seed list of host:port addresses - // of cluster/sentinel nodes. - Addrs []string - - // The sentinel master name. - // Only failover clients. - MasterName string - - // Database to be selected after connecting to the server. - // Only single-node and failover clients. - DB int - - // Only cluster clients. - - // Enables read only queries on slave nodes. - ReadOnly bool - - MaxRedirects int - RouteByLatency bool - - // Common options - - MaxRetries int - Password string - DialTimeout time.Duration - ReadTimeout time.Duration - WriteTimeout time.Duration - PoolSize int - PoolTimeout time.Duration - IdleTimeout time.Duration - IdleCheckFrequency time.Duration -} - -func (o *UniversalOptions) cluster() *ClusterOptions { - if len(o.Addrs) == 0 { - o.Addrs = []string{"127.0.0.1:6379"} - } - - return &ClusterOptions{ - Addrs: o.Addrs, - MaxRedirects: o.MaxRedirects, - RouteByLatency: o.RouteByLatency, - ReadOnly: o.ReadOnly, - - MaxRetries: o.MaxRetries, - Password: o.Password, - DialTimeout: o.DialTimeout, - ReadTimeout: o.ReadTimeout, - WriteTimeout: o.WriteTimeout, - PoolSize: o.PoolSize, - PoolTimeout: o.PoolTimeout, - IdleTimeout: o.IdleTimeout, - IdleCheckFrequency: o.IdleCheckFrequency, - } -} - -func (o *UniversalOptions) failover() *FailoverOptions { - if len(o.Addrs) == 0 { - o.Addrs = []string{"127.0.0.1:26379"} - } - - return &FailoverOptions{ - SentinelAddrs: o.Addrs, - MasterName: o.MasterName, - DB: o.DB, - - MaxRetries: o.MaxRetries, - Password: o.Password, - DialTimeout: o.DialTimeout, - ReadTimeout: o.ReadTimeout, - WriteTimeout: o.WriteTimeout, - PoolSize: o.PoolSize, - PoolTimeout: o.PoolTimeout, - IdleTimeout: o.IdleTimeout, - IdleCheckFrequency: o.IdleCheckFrequency, - } -} - -func (o *UniversalOptions) simple() *Options { - addr := "127.0.0.1:6379" - if len(o.Addrs) > 0 { - addr = o.Addrs[0] - } - - return &Options{ - Addr: addr, - DB: o.DB, - - MaxRetries: o.MaxRetries, - Password: o.Password, - DialTimeout: o.DialTimeout, - ReadTimeout: o.ReadTimeout, - WriteTimeout: o.WriteTimeout, - PoolSize: o.PoolSize, - PoolTimeout: o.PoolTimeout, - IdleTimeout: o.IdleTimeout, - IdleCheckFrequency: o.IdleCheckFrequency, - } -} - -// -------------------------------------------------------------------- - -// UniversalClient is an abstract client which - based on the provided options - -// can connect to either clusters, or sentinel-backed failover instances or simple -// single-instance servers. This can be useful for testing cluster-specific -// applications locally. -type UniversalClient interface { - Cmdable - Process(cmd Cmder) error - Subscribe(channels ...string) *PubSub - PSubscribe(channels ...string) *PubSub - Close() error -} - -var _ UniversalClient = (*Client)(nil) -var _ UniversalClient = (*ClusterClient)(nil) - -// NewUniversalClient returns a new multi client. The type of client returned depends -// on the following three conditions: -// -// 1. if a MasterName is passed a sentinel-backed FailoverClient will be returned -// 2. if the number of Addrs is two or more, a ClusterClient will be returned -// 3. otherwise, a single-node redis Client will be returned. -func NewUniversalClient(opts *UniversalOptions) UniversalClient { - if opts.MasterName != "" { - return NewFailoverClient(opts.failover()) - } else if len(opts.Addrs) > 1 { - return NewClusterClient(opts.cluster()) - } - return NewClient(opts.simple()) -} diff --git a/vendor/github.com/gocql/gocql/.gitignore b/vendor/github.com/gocql/gocql/.gitignore deleted file mode 100644 index bce6cf584a..0000000000 --- a/vendor/github.com/gocql/gocql/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -gocql-fuzz -fuzz-corpus -fuzz-work -gocql.test -.idea diff --git a/vendor/github.com/gocql/gocql/.travis.yml b/vendor/github.com/gocql/gocql/.travis.yml deleted file mode 100644 index 9845342fe4..0000000000 --- a/vendor/github.com/gocql/gocql/.travis.yml +++ /dev/null @@ -1,45 +0,0 @@ -language: go - -sudo: required -dist: trusty - -cache: - directories: - - $HOME/.ccm/repository - - $HOME/.local/lib/python2.7 - -matrix: - fast_finish: true - -env: - global: - - GOMAXPROCS=2 - matrix: - - CASS=2.1.12 - AUTH=false - - CASS=2.2.5 - AUTH=true - - CASS=2.2.5 - AUTH=false - - CASS=3.0.8 - AUTH=false - -go: - - 1.8 - - 1.9 - -install: - - pip install --user cql PyYAML six - - git clone https://github.com/pcmanus/ccm.git - - pushd ccm - - ./setup.py install --user - - popd - - go get . - -script: - - set -e - - PATH=$PATH:$HOME/.local/bin bash integration.sh $CASS $AUTH - - go vet . - -notifications: - - email: false diff --git a/vendor/github.com/gocql/gocql/AUTHORS b/vendor/github.com/gocql/gocql/AUTHORS deleted file mode 100644 index a1ea3c2cbe..0000000000 --- a/vendor/github.com/gocql/gocql/AUTHORS +++ /dev/null @@ -1,103 +0,0 @@ -# This source file refers to The gocql Authors for copyright purposes. - -Christoph Hack -Jonathan Rudenberg -Thorsten von Eicken -Matt Robenolt -Phillip Couto -Niklas Korz -Nimi Wariboko Jr -Ghais Issa -Sasha Klizhentas -Konstantin Cherkasov -Ben Hood <0x6e6562@gmail.com> -Pete Hopkins -Chris Bannister -Maxim Bublis -Alex Zorin -Kasper Middelboe Petersen -Harpreet Sawhney -Charlie Andrews -Stanislavs Koikovs -Dan Forest -Miguel Serrano -Stefan Radomski -Josh Wright -Jacob Rhoden -Ben Frye -Fred McCann -Dan Simmons -Muir Manders -Sankar P -Julien Da Silva -Dan Kennedy -Nick Dhupia -Yasuharu Goto -Jeremy Schlatter -Matthias Kadenbach -Dean Elbaz -Mike Berman -Dmitriy Fedorenko -Zach Marcantel -James Maloney -Ashwin Purohit -Dan Kinder -Oliver Beattie -Justin Corpron -Miles Delahunty -Zach Badgett -Maciek Sakrejda -Jeff Mitchell -Baptiste Fontaine -Matt Heath -Jamie Cuthill -Adrian Casajus -John Weldon -Adrien Bustany -Andrey Smirnov -Adam Weiner -Daniel Cannon -Johnny Bergström -Adriano Orioli -Claudiu Raveica -Artem Chernyshev -Ference Fu -LOVOO -nikandfor -Anthony Woods -Alexander Inozemtsev -Rob McColl ; -Viktor Tönköl -Ian Lozinski -Michael Highstead -Sarah Brown -Caleb Doxsey -Frederic Hemery -Pekka Enberg -Mark M -Bartosz Burclaf -Marcus King -Andrew de Andrade -Robert Nix -Nathan Youngman -Charles Law ; -Nathan Davies -Bo Blanton -Vincent Rischmann -Jesse Claven -Derrick Wippler -Leigh McCulloch -Ron Kuris -Raphael Gavache -Yasser Abdolmaleki -Krishnanand Thommandra -Blake Atkinson -Dharmendra Parsaila -Nayef Ghattas -Michał Matczuk -Ben Krebsbach -Vivian Mathews -Sascha Steinbiss -Seth Rosenblum -Javier Zunzunegui -Luke Hines \ No newline at end of file diff --git a/vendor/github.com/gocql/gocql/CONTRIBUTING.md b/vendor/github.com/gocql/gocql/CONTRIBUTING.md deleted file mode 100644 index 093045a31d..0000000000 --- a/vendor/github.com/gocql/gocql/CONTRIBUTING.md +++ /dev/null @@ -1,78 +0,0 @@ -# Contributing to gocql - -**TL;DR** - this manifesto sets out the bare minimum requirements for submitting a patch to gocql. - -This guide outlines the process of landing patches in gocql and the general approach to maintaining the code base. - -## Background - -The goal of the gocql project is to provide a stable and robust CQL driver for Go. gocql is a community driven project that is coordinated by a small team of core developers. - -## Minimum Requirement Checklist - -The following is a check list of requirements that need to be satisfied in order for us to merge your patch: - -* You should raise a pull request to gocql/gocql on Github -* The pull request has a title that clearly summarizes the purpose of the patch -* The motivation behind the patch is clearly defined in the pull request summary -* Your name and email have been added to the `AUTHORS` file (for copyright purposes) -* The patch will merge cleanly -* The test coverage does not fall below the critical threshold (currently 64%) -* The merge commit passes the regression test suite on Travis -* `go fmt` has been applied to the submitted code -* Functional changes (i.e. new features or changed behavior) are appropriately documented, either as a godoc or in the README (non-functional changes such as bug fixes may not require documentation) - -If there are any requirements that can't be reasonably satisfied, please state this either on the pull request or as part of discussion on the mailing list. Where appropriate, the core team may apply discretion and make an exception to these requirements. - -## Beyond The Checklist - -In addition to stating the hard requirements, there are a bunch of things that we consider when assessing changes to the library. These soft requirements are helpful pointers of how to get a patch landed quicker and with less fuss. - -### General QA Approach - -The gocql team needs to consider the ongoing maintainability of the library at all times. Patches that look like they will introduce maintenance issues for the team will not be accepted. - -Your patch will get merged quicker if you have decent test cases that provide test coverage for the new behavior you wish to introduce. - -Unit tests are good, integration tests are even better. An example of a unit test is `marshal_test.go` - this tests the serialization code in isolation. `cassandra_test.go` is an integration test suite that is executed against every version of Cassandra that gocql supports as part of the CI process on Travis. - -That said, the point of writing tests is to provide a safety net to catch regressions, so there is no need to go overboard with tests. Remember that the more tests you write, the more code we will have to maintain. So there's a balance to strike there. - -### When It's Too Difficult To Automate Testing - -There are legitimate examples of where it is infeasible to write a regression test for a change. Never fear, we will still consider the patch and quite possibly accept the change without a test. The gocql team takes a pragmatic approach to testing. At the end of the day, you could be addressing an issue that is too difficult to reproduce in a test suite, but still occurs in a real production app. In this case, your production app is the test case, and we will have to trust that your change is good. - -Examples of pull requests that have been accepted without tests include: - -* https://github.com/gocql/gocql/pull/181 - this patch would otherwise require a multi-node cluster to be booted as part of the CI build -* https://github.com/gocql/gocql/pull/179 - this bug can only be reproduced under heavy load in certain circumstances - -### Sign Off Procedure - -Generally speaking, a pull request can get merged by any one of the core gocql team. If your change is minor, chances are that one team member will just go ahead and merge it there and then. As stated earlier, suitable test coverage will increase the likelihood that a single reviewer will assess and merge your change. If your change has no test coverage, or looks like it may have wider implications for the health and stability of the library, the reviewer may elect to refer the change to another team member to achieve consensus before proceeding. Therefore, the tighter and cleaner your patch is, the quicker it will go through the review process. - -### Supported Features - -gocql is a low level wire driver for Cassandra CQL. By and large, we would like to keep the functional scope of the library as narrow as possible. We think that gocql should be tight and focused, and we will be naturally skeptical of things that could just as easily be implemented in a higher layer. Inevitably you will come across something that could be implemented in a higher layer, save for a minor change to the core API. In this instance, please strike up a conversation with the gocql team. Chances are we will understand what you are trying to achieve and will try to accommodate this in a maintainable way. - -### Longer Term Evolution - -There are some long term plans for gocql that have to be taken into account when assessing changes. That said, gocql is ultimately a community driven project and we don't have a massive development budget, so sometimes the long term view might need to be de-prioritized ahead of short term changes. - -## Officially Supported Server Versions - -Currently, the officially supported versions of the Cassandra server include: - -* 1.2.18 -* 2.0.9 - -Chances are that gocql will work with many other versions. If you would like us to support a particular version of Cassandra, please start a conversation about what version you'd like us to consider. We are more likely to accept a new version if you help out by extending the regression suite to cover the new version to be supported. - -## The Core Dev Team - -The core development team includes: - -* tux21b -* phillipCouto -* Zariel -* 0x6e6562 diff --git a/vendor/github.com/gocql/gocql/LICENSE b/vendor/github.com/gocql/gocql/LICENSE deleted file mode 100644 index 3836494a93..0000000000 --- a/vendor/github.com/gocql/gocql/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2016, The Gocql authors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gocql/gocql/README.md b/vendor/github.com/gocql/gocql/README.md deleted file mode 100644 index bc07eeca1a..0000000000 --- a/vendor/github.com/gocql/gocql/README.md +++ /dev/null @@ -1,214 +0,0 @@ -gocql -===== - -[![Join the chat at https://gitter.im/gocql/gocql](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/gocql/gocql?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Build Status](https://travis-ci.org/gocql/gocql.svg?branch=master)](https://travis-ci.org/gocql/gocql) -[![GoDoc](https://godoc.org/github.com/gocql/gocql?status.svg)](https://godoc.org/github.com/gocql/gocql) - -Package gocql implements a fast and robust Cassandra client for the -Go programming language. - -Project Website: https://gocql.github.io/
-API documentation: https://godoc.org/github.com/gocql/gocql
-Discussions: https://groups.google.com/forum/#!forum/gocql - -Supported Versions ------------------- - -The following matrix shows the versions of Go and Cassandra that are tested with the integration test suite as part of the CI build: - -Go/Cassandra | 2.1.x | 2.2.x | 3.0.x --------------| -------| ------| --------- -1.8 | yes | yes | yes -1.9 | yes | yes | yes - -Gocql has been tested in production against many different versions of Cassandra. Due to limits in our CI setup we only test against the latest 3 major releases, which coincide with the official support from the Apache project. - -Sunsetting Model ----------------- - -In general, the gocql team will focus on supporting the current and previous versions of Go. gocql may still work with older versions of Go, but official support for these versions will have been sunset. - -Installation ------------- - - go get github.com/gocql/gocql - - -Features --------- - -* Modern Cassandra client using the native transport -* Automatic type conversions between Cassandra and Go - * Support for all common types including sets, lists and maps - * Custom types can implement a `Marshaler` and `Unmarshaler` interface - * Strict type conversions without any loss of precision - * Built-In support for UUIDs (version 1 and 4) -* Support for logged, unlogged and counter batches -* Cluster management - * Automatic reconnect on connection failures with exponential falloff - * Round robin distribution of queries to different hosts - * Round robin distribution of queries to different connections on a host - * Each connection can execute up to n concurrent queries (whereby n is the limit set by the protocol version the client chooses to use) - * Optional automatic discovery of nodes - * Policy based connection pool with token aware and round-robin policy implementations -* Support for password authentication -* Iteration over paged results with configurable page size -* Support for TLS/SSL -* Optional frame compression (using snappy) -* Automatic query preparation -* Support for query tracing -* Support for Cassandra 2.1+ [binary protocol version 3](https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v3.spec) - * Support for up to 32768 streams - * Support for tuple types - * Support for client side timestamps by default - * Support for UDTs via a custom marshaller or struct tags -* Support for Cassandra 3.0+ [binary protocol version 4](https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v4.spec) -* An API to access the schema metadata of a given keyspace - -Performance ------------ -While the driver strives to be highly performant, there are cases where it is difficult to test and verify. The driver is built -with maintainability and code readability in mind first and then performance and features, as such every now and then performance -may degrade, if this occurs please report and issue and it will be looked at and remedied. The only time the driver copies data from -its read buffer is when it Unmarshal's data into supplied types. - -Some tips for getting more performance from the driver: -* Use the TokenAware policy -* Use many goroutines when doing inserts, the driver is asynchronous but provides a synchronous API, it can execute many queries concurrently -* Tune query page size -* Reading data from the network to unmarshal will incur a large amount of allocations, this can adversely affect the garbage collector, tune `GOGC` -* Close iterators after use to recycle byte buffers - -Important Default Keyspace Changes ----------------------------------- -gocql no longer supports executing "use " statements to simplify the library. The user still has the -ability to define the default keyspace for connections but now the keyspace can only be defined before a -session is created. Queries can still access keyspaces by indicating the keyspace in the query: -```sql -SELECT * FROM example2.table; -``` - -Example of correct usage: -```go - cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3") - cluster.Keyspace = "example" - ... - session, err := cluster.CreateSession() - -``` -Example of incorrect usage: -```go - cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3") - cluster.Keyspace = "example" - ... - session, err := cluster.CreateSession() - - if err = session.Query("use example2").Exec(); err != nil { - log.Fatal(err) - } -``` -This will result in an err being returned from the session.Query line as the user is trying to execute a "use" -statement. - -Example -------- - -```go -/* Before you execute the program, Launch `cqlsh` and execute: -create keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }; -create table example.tweet(timeline text, id UUID, text text, PRIMARY KEY(id)); -create index on example.tweet(timeline); -*/ -package main - -import ( - "fmt" - "log" - - "github.com/gocql/gocql" -) - -func main() { - // connect to the cluster - cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3") - cluster.Keyspace = "example" - cluster.Consistency = gocql.Quorum - session, _ := cluster.CreateSession() - defer session.Close() - - // insert a tweet - if err := session.Query(`INSERT INTO tweet (timeline, id, text) VALUES (?, ?, ?)`, - "me", gocql.TimeUUID(), "hello world").Exec(); err != nil { - log.Fatal(err) - } - - var id gocql.UUID - var text string - - /* Search for a specific set of records whose 'timeline' column matches - * the value 'me'. The secondary index that we created earlier will be - * used for optimizing the search */ - if err := session.Query(`SELECT id, text FROM tweet WHERE timeline = ? LIMIT 1`, - "me").Consistency(gocql.One).Scan(&id, &text); err != nil { - log.Fatal(err) - } - fmt.Println("Tweet:", id, text) - - // list all tweets - iter := session.Query(`SELECT id, text FROM tweet WHERE timeline = ?`, "me").Iter() - for iter.Scan(&id, &text) { - fmt.Println("Tweet:", id, text) - } - if err := iter.Close(); err != nil { - log.Fatal(err) - } -} -``` - -Data Binding ------------- - -There are various ways to bind application level data structures to CQL statements: - -* You can write the data binding by hand, as outlined in the Tweet example. This provides you with the greatest flexibility, but it does mean that you need to keep your application code in sync with your Cassandra schema. -* You can dynamically marshal an entire query result into an `[]map[string]interface{}` using the `SliceMap()` API. This returns a slice of row maps keyed by CQL column names. This method requires no special interaction with the gocql API, but it does require your application to be able to deal with a key value view of your data. -* As a refinement on the `SliceMap()` API you can also call `MapScan()` which returns `map[string]interface{}` instances in a row by row fashion. -* The `Bind()` API provides a client app with a low level mechanism to introspect query meta data and extract appropriate field values from application level data structures. -* The [gocqlx](https://github.com/scylladb/gocqlx) package is an idiomatic extension to gocql that provides usability features. With gocqlx you can bind the query parameters from maps and structs, use named query parameters (:identifier) and scan the query results into structs and slices. It comes with a fluent and flexible CQL query builder that supports full CQL spec, including BATCH statements and custom functions. -* Building on top of the gocql driver, [cqlr](https://github.com/relops/cqlr) adds the ability to auto-bind a CQL iterator to a struct or to bind a struct to an INSERT statement. -* Another external project that layers on top of gocql is [cqlc](http://relops.com/cqlc) which generates gocql compliant code from your Cassandra schema so that you can write type safe CQL statements in Go with a natural query syntax. -* [gocassa](https://github.com/hailocab/gocassa) is an external project that layers on top of gocql to provide convenient query building and data binding. -* [gocqltable](https://github.com/kristoiv/gocqltable) provides an ORM-style convenience layer to make CRUD operations with gocql easier. - -Ecosystem ---------- - -The following community maintained tools are known to integrate with gocql: - -* [gocqlx](https://github.com/scylladb/gocqlx) is a gocql extension that automates data binding, adds named queries support, provides flexible query builders and plays well with gocql. -* [journey](https://github.com/db-journey/journey) is a migration tool with Cassandra support. -* [negronicql](https://github.com/mikebthun/negronicql) is gocql middleware for Negroni. -* [cqlr](https://github.com/relops/cqlr) adds the ability to auto-bind a CQL iterator to a struct or to bind a struct to an INSERT statement. -* [cqlc](http://relops.com/cqlc) generates gocql compliant code from your Cassandra schema so that you can write type safe CQL statements in Go with a natural query syntax. -* [gocassa](https://github.com/hailocab/gocassa) provides query building, adds data binding, and provides easy-to-use "recipe" tables for common query use-cases. -* [gocqltable](https://github.com/kristoiv/gocqltable) is a wrapper around gocql that aims to simplify common operations. -* [gockle](https://github.com/willfaught/gockle) provides simple, mockable interfaces that wrap gocql types -* [scylladb](https://github.com/scylladb/scylla) is a fast Apache Cassandra-compatible NoSQL database - -Other Projects --------------- - -* [gocqldriver](https://github.com/tux21b/gocqldriver) is the predecessor of gocql based on Go's `database/sql` package. This project isn't maintained anymore, because Cassandra wasn't a good fit for the traditional `database/sql` API. Use this package instead. - -SEO ---- - -For some reason, when you Google `golang cassandra`, this project doesn't feature very highly in the result list. But if you Google `go cassandra`, then we're a bit higher up the list. So this is note to try to convince Google that golang is an alias for Go. - -License -------- - -> Copyright (c) 2012-2016 The gocql Authors. All rights reserved. -> Use of this source code is governed by a BSD-style -> license that can be found in the LICENSE file. diff --git a/vendor/github.com/gocql/gocql/address_translators.go b/vendor/github.com/gocql/gocql/address_translators.go deleted file mode 100644 index 6638bcaa83..0000000000 --- a/vendor/github.com/gocql/gocql/address_translators.go +++ /dev/null @@ -1,26 +0,0 @@ -package gocql - -import "net" - -// AddressTranslator provides a way to translate node addresses (and ports) that are -// discovered or received as a node event. This can be useful in an ec2 environment, -// for instance, to translate public IPs to private IPs. -type AddressTranslator interface { - // Translate will translate the provided address and/or port to another - // address and/or port. If no translation is possible, Translate will return the - // address and port provided to it. - Translate(addr net.IP, port int) (net.IP, int) -} - -type AddressTranslatorFunc func(addr net.IP, port int) (net.IP, int) - -func (fn AddressTranslatorFunc) Translate(addr net.IP, port int) (net.IP, int) { - return fn(addr, port) -} - -// IdentityTranslator will do nothing but return what it was provided. It is essentially a no-op. -func IdentityTranslator() AddressTranslator { - return AddressTranslatorFunc(func(addr net.IP, port int) (net.IP, int) { - return addr, port - }) -} diff --git a/vendor/github.com/gocql/gocql/cluster.go b/vendor/github.com/gocql/gocql/cluster.go deleted file mode 100644 index b034270642..0000000000 --- a/vendor/github.com/gocql/gocql/cluster.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) 2012 The gocql Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gocql - -import ( - "errors" - "net" - "time" -) - -// PoolConfig configures the connection pool used by the driver, it defaults to -// using a round-robin host selection policy and a round-robin connection selection -// policy for each host. -type PoolConfig struct { - // HostSelectionPolicy sets the policy for selecting which host to use for a - // given query (default: RoundRobinHostPolicy()) - HostSelectionPolicy HostSelectionPolicy -} - -func (p PoolConfig) buildPool(session *Session) *policyConnPool { - return newPolicyConnPool(session) -} - -// ClusterConfig is a struct to configure the default cluster implementation -// of gocql. It has a variety of attributes that can be used to modify the -// behavior to fit the most common use cases. Applications that require a -// different setup must implement their own cluster. -type ClusterConfig struct { - // addresses for the initial connections. It is recommended to use the value set in - // the Cassandra config for broadcast_address or listen_address, an IP address not - // a domain name. This is because events from Cassandra will use the configured IP - // address, which is used to index connected hosts. If the domain name specified - // resolves to more than 1 IP address then the driver may connect multiple times to - // the same host, and will not mark the node being down or up from events. - Hosts []string - CQLVersion string // CQL version (default: 3.0.0) - - // ProtoVersion sets the version of the native protocol to use, this will - // enable features in the driver for specific protocol versions, generally this - // should be set to a known version (2,3,4) for the cluster being connected to. - // - // If it is 0 or unset (the default) then the driver will attempt to discover the - // highest supported protocol for the cluster. In clusters with nodes of different - // versions the protocol selected is not defined (ie, it can be any of the supported in the cluster) - ProtoVersion int - Timeout time.Duration // connection timeout (default: 600ms) - ConnectTimeout time.Duration // initial connection timeout, used during initial dial to server (default: 600ms) - Port int // port (default: 9042) - Keyspace string // initial keyspace (optional) - NumConns int // number of connections per host (default: 2) - Consistency Consistency // default consistency level (default: Quorum) - Compressor Compressor // compression algorithm (default: nil) - Authenticator Authenticator // authenticator (default: nil) - RetryPolicy RetryPolicy // Default retry policy to use for queries (default: 0) - SocketKeepalive time.Duration // The keepalive period to use, enabled if > 0 (default: 0) - MaxPreparedStmts int // Sets the maximum cache size for prepared statements globally for gocql (default: 1000) - MaxRoutingKeyInfo int // Sets the maximum cache size for query info about statements for each session (default: 1000) - PageSize int // Default page size to use for created sessions (default: 5000) - SerialConsistency SerialConsistency // Sets the consistency for the serial part of queries, values can be either SERIAL or LOCAL_SERIAL (default: unset) - SslOpts *SslOptions - DefaultTimestamp bool // Sends a client side timestamp for all requests which overrides the timestamp at which it arrives at the server. (default: true, only enabled for protocol 3 and above) - // PoolConfig configures the underlying connection pool, allowing the - // configuration of host selection and connection selection policies. - PoolConfig PoolConfig - - // If not zero, gocql attempt to reconnect known DOWN nodes in every ReconnectInterval. - ReconnectInterval time.Duration - - // The maximum amount of time to wait for schema agreement in a cluster after - // receiving a schema change frame. (deault: 60s) - MaxWaitSchemaAgreement time.Duration - - // HostFilter will filter all incoming events for host, any which don't pass - // the filter will be ignored. If set will take precedence over any options set - // via Discovery - HostFilter HostFilter - - // AddressTranslator will translate addresses found on peer discovery and/or - // node change events. - AddressTranslator AddressTranslator - - // If IgnorePeerAddr is true and the address in system.peers does not match - // the supplied host by either initial hosts or discovered via events then the - // host will be replaced with the supplied address. - // - // For example if an event comes in with host=10.0.0.1 but when looking up that - // address in system.local or system.peers returns 127.0.0.1, the peer will be - // set to 10.0.0.1 which is what will be used to connect to. - IgnorePeerAddr bool - - // If DisableInitialHostLookup then the driver will not attempt to get host info - // from the system.peers table, this will mean that the driver will connect to - // hosts supplied and will not attempt to lookup the hosts information, this will - // mean that data_centre, rack and token information will not be available and as - // such host filtering and token aware query routing will not be available. - DisableInitialHostLookup bool - - // Configure events the driver will register for - Events struct { - // disable registering for status events (node up/down) - DisableNodeStatusEvents bool - // disable registering for topology events (node added/removed/moved) - DisableTopologyEvents bool - // disable registering for schema events (keyspace/table/function removed/created/updated) - DisableSchemaEvents bool - } - - // DisableSkipMetadata will override the internal result metadata cache so that the driver does not - // send skip_metadata for queries, this means that the result will always contain - // the metadata to parse the rows and will not reuse the metadata from the prepared - // statement. - // - // See https://issues.apache.org/jira/browse/CASSANDRA-10786 - DisableSkipMetadata bool - - // QueryObserver will set the provided query observer on all queries created from this session. - // Use it to collect metrics / stats from queries by providing an implementation of QueryObserver. - QueryObserver QueryObserver - - // BatchObserver will set the provided batch observer on all queries created from this session. - // Use it to collect metrics / stats from batche queries by providing an implementation of BatchObserver. - BatchObserver BatchObserver - - // internal config for testing - disableControlConn bool -} - -// NewCluster generates a new config for the default cluster implementation. -// -// The supplied hosts are used to initially connect to the cluster then the rest of -// the ring will be automatically discovered. It is recommended to use the value set in -// the Cassandra config for broadcast_address or listen_address, an IP address not -// a domain name. This is because events from Cassandra will use the configured IP -// address, which is used to index connected hosts. If the domain name specified -// resolves to more than 1 IP address then the driver may connect multiple times to -// the same host, and will not mark the node being down or up from events. -func NewCluster(hosts ...string) *ClusterConfig { - cfg := &ClusterConfig{ - Hosts: hosts, - CQLVersion: "3.0.0", - Timeout: 600 * time.Millisecond, - ConnectTimeout: 600 * time.Millisecond, - Port: 9042, - NumConns: 2, - Consistency: Quorum, - MaxPreparedStmts: defaultMaxPreparedStmts, - MaxRoutingKeyInfo: 1000, - PageSize: 5000, - DefaultTimestamp: true, - MaxWaitSchemaAgreement: 60 * time.Second, - ReconnectInterval: 60 * time.Second, - } - return cfg -} - -// CreateSession initializes the cluster based on this config and returns a -// session object that can be used to interact with the database. -func (cfg *ClusterConfig) CreateSession() (*Session, error) { - return NewSession(*cfg) -} - -// translateAddressPort is a helper method that will use the given AddressTranslator -// if defined, to translate the given address and port into a possibly new address -// and port, If no AddressTranslator or if an error occurs, the given address and -// port will be returned. -func (cfg *ClusterConfig) translateAddressPort(addr net.IP, port int) (net.IP, int) { - if cfg.AddressTranslator == nil || len(addr) == 0 { - return addr, port - } - newAddr, newPort := cfg.AddressTranslator.Translate(addr, port) - if gocqlDebug { - Logger.Printf("gocql: translating address '%v:%d' to '%v:%d'", addr, port, newAddr, newPort) - } - return newAddr, newPort -} - -func (cfg *ClusterConfig) filterHost(host *HostInfo) bool { - return !(cfg.HostFilter == nil || cfg.HostFilter.Accept(host)) -} - -var ( - ErrNoHosts = errors.New("no hosts provided") - ErrNoConnectionsStarted = errors.New("no connections were made when creating the session") - ErrHostQueryFailed = errors.New("unable to populate Hosts") -) diff --git a/vendor/github.com/gocql/gocql/compressor.go b/vendor/github.com/gocql/gocql/compressor.go deleted file mode 100644 index 26853ae7f6..0000000000 --- a/vendor/github.com/gocql/gocql/compressor.go +++ /dev/null @@ -1,28 +0,0 @@ -package gocql - -import ( - "github.com/golang/snappy" -) - -type Compressor interface { - Name() string - Encode(data []byte) ([]byte, error) - Decode(data []byte) ([]byte, error) -} - -// SnappyCompressor implements the Compressor interface and can be used to -// compress incoming and outgoing frames. The snappy compression algorithm -// aims for very high speeds and reasonable compression. -type SnappyCompressor struct{} - -func (s SnappyCompressor) Name() string { - return "snappy" -} - -func (s SnappyCompressor) Encode(data []byte) ([]byte, error) { - return snappy.Encode(nil, data), nil -} - -func (s SnappyCompressor) Decode(data []byte) ([]byte, error) { - return snappy.Decode(nil, data) -} diff --git a/vendor/github.com/gocql/gocql/conn.go b/vendor/github.com/gocql/gocql/conn.go deleted file mode 100644 index d16a704503..0000000000 --- a/vendor/github.com/gocql/gocql/conn.go +++ /dev/null @@ -1,1182 +0,0 @@ -// Copyright (c) 2012 The gocql Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gocql - -import ( - "bufio" - "context" - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/gocql/gocql/internal/lru" - - "github.com/gocql/gocql/internal/streams" -) - -var ( - approvedAuthenticators = [...]string{ - "org.apache.cassandra.auth.PasswordAuthenticator", - "com.instaclustr.cassandra.auth.SharedSecretAuthenticator", - "com.datastax.bdp.cassandra.auth.DseAuthenticator", - } -) - -func approve(authenticator string) bool { - for _, s := range approvedAuthenticators { - if authenticator == s { - return true - } - } - return false -} - -//JoinHostPort is a utility to return a address string that can be used -//gocql.Conn to form a connection with a host. -func JoinHostPort(addr string, port int) string { - addr = strings.TrimSpace(addr) - if _, _, err := net.SplitHostPort(addr); err != nil { - addr = net.JoinHostPort(addr, strconv.Itoa(port)) - } - return addr -} - -type Authenticator interface { - Challenge(req []byte) (resp []byte, auth Authenticator, err error) - Success(data []byte) error -} - -type PasswordAuthenticator struct { - Username string - Password string -} - -func (p PasswordAuthenticator) Challenge(req []byte) ([]byte, Authenticator, error) { - if !approve(string(req)) { - return nil, nil, fmt.Errorf("unexpected authenticator %q", req) - } - resp := make([]byte, 2+len(p.Username)+len(p.Password)) - resp[0] = 0 - copy(resp[1:], p.Username) - resp[len(p.Username)+1] = 0 - copy(resp[2+len(p.Username):], p.Password) - return resp, nil, nil -} - -func (p PasswordAuthenticator) Success(data []byte) error { - return nil -} - -type SslOptions struct { - *tls.Config - - // CertPath and KeyPath are optional depending on server - // config, but both fields must be omitted to avoid using a - // client certificate - CertPath string - KeyPath string - CaPath string //optional depending on server config - // If you want to verify the hostname and server cert (like a wildcard for cass cluster) then you should turn this on - // This option is basically the inverse of InSecureSkipVerify - // See InSecureSkipVerify in http://golang.org/pkg/crypto/tls/ for more info - EnableHostVerification bool -} - -type ConnConfig struct { - ProtoVersion int - CQLVersion string - Timeout time.Duration - ConnectTimeout time.Duration - Compressor Compressor - Authenticator Authenticator - Keepalive time.Duration - tlsConfig *tls.Config -} - -type ConnErrorHandler interface { - HandleError(conn *Conn, err error, closed bool) -} - -type connErrorHandlerFn func(conn *Conn, err error, closed bool) - -func (fn connErrorHandlerFn) HandleError(conn *Conn, err error, closed bool) { - fn(conn, err, closed) -} - -// If not zero, how many timeouts we will allow to occur before the connection is closed -// and restarted. This is to prevent a single query timeout from killing a connection -// which may be serving more queries just fine. -// Default is 10, should not be changed concurrently with queries. -var TimeoutLimit int64 = 10 - -// Conn is a single connection to a Cassandra node. It can be used to execute -// queries, but users are usually advised to use a more reliable, higher -// level API. -type Conn struct { - conn net.Conn - r *bufio.Reader - timeout time.Duration - cfg *ConnConfig - - headerBuf [maxFrameHeaderSize]byte - - streams *streams.IDGenerator - mu sync.RWMutex - calls map[int]*callReq - - errorHandler ConnErrorHandler - compressor Compressor - auth Authenticator - addr string - version uint8 - currentKeyspace string - - session *Session - - closed int32 - quit chan struct{} - - timeouts int64 -} - -// Connect establishes a connection to a Cassandra node. -func (s *Session) dial(ip net.IP, port int, cfg *ConnConfig, errorHandler ConnErrorHandler) (*Conn, error) { - // TODO(zariel): remove these - if len(ip) == 0 || ip.IsUnspecified() { - panic(fmt.Sprintf("host missing connect ip address: %v", ip)) - } else if port == 0 { - panic(fmt.Sprintf("host missing port: %v", port)) - } - - var ( - err error - conn net.Conn - ) - - dialer := &net.Dialer{ - Timeout: cfg.ConnectTimeout, - } - - // TODO(zariel): handle ipv6 zone - addr := (&net.TCPAddr{IP: ip, Port: port}).String() - - if cfg.tlsConfig != nil { - // the TLS config is safe to be reused by connections but it must not - // be modified after being used. - conn, err = tls.DialWithDialer(dialer, "tcp", addr, cfg.tlsConfig) - } else { - conn, err = dialer.Dial("tcp", addr) - } - - if err != nil { - return nil, err - } - - c := &Conn{ - conn: conn, - r: bufio.NewReader(conn), - cfg: cfg, - calls: make(map[int]*callReq), - timeout: cfg.Timeout, - version: uint8(cfg.ProtoVersion), - addr: conn.RemoteAddr().String(), - errorHandler: errorHandler, - compressor: cfg.Compressor, - auth: cfg.Authenticator, - quit: make(chan struct{}), - session: s, - streams: streams.New(cfg.ProtoVersion), - } - - if cfg.Keepalive > 0 { - c.setKeepalive(cfg.Keepalive) - } - - var ( - ctx context.Context - cancel func() - ) - if cfg.ConnectTimeout > 0 { - ctx, cancel = context.WithTimeout(context.Background(), cfg.ConnectTimeout) - } else { - ctx, cancel = context.WithCancel(context.Background()) - } - defer cancel() - - frameTicker := make(chan struct{}, 1) - startupErr := make(chan error) - go func() { - for range frameTicker { - err := c.recv() - if err != nil { - select { - case startupErr <- err: - case <-ctx.Done(): - } - - return - } - } - }() - - go func() { - defer close(frameTicker) - err := c.startup(ctx, frameTicker) - select { - case startupErr <- err: - case <-ctx.Done(): - } - }() - - select { - case err := <-startupErr: - if err != nil { - c.Close() - return nil, err - } - case <-ctx.Done(): - c.Close() - return nil, errors.New("gocql: no response to connection startup within timeout") - } - - go c.serve() - - return c, nil -} - -func (c *Conn) Write(p []byte) (int, error) { - if c.timeout > 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.timeout)) - } - - return c.conn.Write(p) -} - -func (c *Conn) Read(p []byte) (n int, err error) { - const maxAttempts = 5 - - for i := 0; i < maxAttempts; i++ { - var nn int - if c.timeout > 0 { - c.conn.SetReadDeadline(time.Now().Add(c.timeout)) - } - - nn, err = io.ReadFull(c.r, p[n:]) - n += nn - if err == nil { - break - } - - if verr, ok := err.(net.Error); !ok || !verr.Temporary() { - break - } - } - - return -} - -func (c *Conn) startup(ctx context.Context, frameTicker chan struct{}) error { - m := map[string]string{ - "CQL_VERSION": c.cfg.CQLVersion, - } - - if c.compressor != nil { - m["COMPRESSION"] = c.compressor.Name() - } - - select { - case frameTicker <- struct{}{}: - case <-ctx.Done(): - return ctx.Err() - } - - framer, err := c.exec(ctx, &writeStartupFrame{opts: m}, nil) - if err != nil { - return err - } - - frame, err := framer.parseFrame() - if err != nil { - return err - } - - switch v := frame.(type) { - case error: - return v - case *readyFrame: - return nil - case *authenticateFrame: - return c.authenticateHandshake(ctx, v, frameTicker) - default: - return NewErrProtocol("Unknown type of response to startup frame: %s", v) - } -} - -func (c *Conn) authenticateHandshake(ctx context.Context, authFrame *authenticateFrame, frameTicker chan struct{}) error { - if c.auth == nil { - return fmt.Errorf("authentication required (using %q)", authFrame.class) - } - - resp, challenger, err := c.auth.Challenge([]byte(authFrame.class)) - if err != nil { - return err - } - - req := &writeAuthResponseFrame{data: resp} - - for { - select { - case frameTicker <- struct{}{}: - case <-ctx.Done(): - return ctx.Err() - } - - framer, err := c.exec(ctx, req, nil) - if err != nil { - return err - } - - frame, err := framer.parseFrame() - if err != nil { - return err - } - - switch v := frame.(type) { - case error: - return v - case *authSuccessFrame: - if challenger != nil { - return challenger.Success(v.data) - } - return nil - case *authChallengeFrame: - resp, challenger, err = challenger.Challenge(v.data) - if err != nil { - return err - } - - req = &writeAuthResponseFrame{ - data: resp, - } - default: - return fmt.Errorf("unknown frame response during authentication: %v", v) - } - - framerPool.Put(framer) - } -} - -func (c *Conn) closeWithError(err error) { - if !atomic.CompareAndSwapInt32(&c.closed, 0, 1) { - return - } - - // we should attempt to deliver the error back to the caller if it - // exists - if err != nil { - c.mu.RLock() - for _, req := range c.calls { - // we need to send the error to all waiting queries, put the state - // of this conn into not active so that it can not execute any queries. - select { - case req.resp <- err: - case <-req.timeout: - } - } - c.mu.RUnlock() - } - - // if error was nil then unblock the quit channel - close(c.quit) - cerr := c.close() - - if err != nil { - c.errorHandler.HandleError(c, err, true) - } else if cerr != nil { - // TODO(zariel): is it a good idea to do this? - c.errorHandler.HandleError(c, cerr, true) - } -} - -func (c *Conn) close() error { - return c.conn.Close() -} - -func (c *Conn) Close() { - c.closeWithError(nil) -} - -// Serve starts the stream multiplexer for this connection, which is required -// to execute any queries. This method runs as long as the connection is -// open and is therefore usually called in a separate goroutine. -func (c *Conn) serve() { - var err error - for err == nil { - err = c.recv() - } - - c.closeWithError(err) -} - -func (c *Conn) discardFrame(head frameHeader) error { - _, err := io.CopyN(ioutil.Discard, c, int64(head.length)) - if err != nil { - return err - } - return nil -} - -type protocolError struct { - frame frame -} - -func (p *protocolError) Error() string { - if err, ok := p.frame.(error); ok { - return err.Error() - } - return fmt.Sprintf("gocql: received unexpected frame on stream %d: %v", p.frame.Header().stream, p.frame) -} - -func (c *Conn) recv() error { - // not safe for concurrent reads - - // read a full header, ignore timeouts, as this is being ran in a loop - // TODO: TCP level deadlines? or just query level deadlines? - if c.timeout > 0 { - c.conn.SetReadDeadline(time.Time{}) - } - - // were just reading headers over and over and copy bodies - head, err := readHeader(c.r, c.headerBuf[:]) - if err != nil { - return err - } - - if head.stream > c.streams.NumStreams { - return fmt.Errorf("gocql: frame header stream is beyond call exepected bounds: %d", head.stream) - } else if head.stream == -1 { - // TODO: handle cassandra event frames, we shouldnt get any currently - framer := newFramer(c, c, c.compressor, c.version) - if err := framer.readFrame(&head); err != nil { - return err - } - go c.session.handleEvent(framer) - return nil - } else if head.stream <= 0 { - // reserved stream that we dont use, probably due to a protocol error - // or a bug in Cassandra, this should be an error, parse it and return. - framer := newFramer(c, c, c.compressor, c.version) - if err := framer.readFrame(&head); err != nil { - return err - } - defer framerPool.Put(framer) - - frame, err := framer.parseFrame() - if err != nil { - return err - } - - return &protocolError{ - frame: frame, - } - } - - c.mu.RLock() - call, ok := c.calls[head.stream] - c.mu.RUnlock() - if call == nil || call.framer == nil || !ok { - Logger.Printf("gocql: received response for stream which has no handler: header=%v\n", head) - return c.discardFrame(head) - } - - err = call.framer.readFrame(&head) - if err != nil { - // only net errors should cause the connection to be closed. Though - // cassandra returning corrupt frames will be returned here as well. - if _, ok := err.(net.Error); ok { - return err - } - } - - // we either, return a response to the caller, the caller timedout, or the - // connection has closed. Either way we should never block indefinatly here - select { - case call.resp <- err: - case <-call.timeout: - c.releaseStream(head.stream) - case <-c.quit: - } - - return nil -} - -func (c *Conn) releaseStream(stream int) { - c.mu.Lock() - call := c.calls[stream] - if call != nil && stream != call.streamID { - panic(fmt.Sprintf("attempt to release streamID with ivalid stream: %d -> %+v\n", stream, call)) - } else if call == nil { - panic(fmt.Sprintf("releasing a stream not in use: %d", stream)) - } - delete(c.calls, stream) - c.mu.Unlock() - - if call.timer != nil { - call.timer.Stop() - } - - streamPool.Put(call) - c.streams.Clear(stream) -} - -func (c *Conn) handleTimeout() { - if TimeoutLimit > 0 && atomic.AddInt64(&c.timeouts, 1) > TimeoutLimit { - c.closeWithError(ErrTooManyTimeouts) - } -} - -var ( - streamPool = sync.Pool{ - New: func() interface{} { - return &callReq{ - resp: make(chan error), - } - }, - } -) - -type callReq struct { - // could use a waitgroup but this allows us to do timeouts on the read/send - resp chan error - framer *framer - timeout chan struct{} // indicates to recv() that a call has timedout - streamID int // current stream in use - - timer *time.Timer -} - -func (c *Conn) exec(ctx context.Context, req frameWriter, tracer Tracer) (*framer, error) { - // TODO: move tracer onto conn - stream, ok := c.streams.GetStream() - if !ok { - return nil, ErrNoStreams - } - - // resp is basically a waiting semaphore protecting the framer - framer := newFramer(c, c, c.compressor, c.version) - - c.mu.Lock() - call := c.calls[stream] - if call != nil { - c.mu.Unlock() - return nil, fmt.Errorf("attempting to use stream already in use: %d -> %d", stream, call.streamID) - } else { - call = streamPool.Get().(*callReq) - } - c.calls[stream] = call - - call.framer = framer - call.timeout = make(chan struct{}) - call.streamID = stream - c.mu.Unlock() - - if tracer != nil { - framer.trace() - } - - err := req.writeFrame(framer, stream) - if err != nil { - // closeWithError will block waiting for this stream to either receive a response - // or for us to timeout, close the timeout chan here. Im not entirely sure - // but we should not get a response after an error on the write side. - close(call.timeout) - // I think this is the correct thing to do, im not entirely sure. It is not - // ideal as readers might still get some data, but they probably wont. - // Here we need to be careful as the stream is not available and if all - // writes just timeout or fail then the pool might use this connection to - // send a frame on, with all the streams used up and not returned. - c.closeWithError(err) - return nil, err - } - - var timeoutCh <-chan time.Time - if c.timeout > 0 { - if call.timer == nil { - call.timer = time.NewTimer(0) - <-call.timer.C - } else { - if !call.timer.Stop() { - select { - case <-call.timer.C: - default: - } - } - } - - call.timer.Reset(c.timeout) - timeoutCh = call.timer.C - } - - var ctxDone <-chan struct{} - if ctx != nil { - ctxDone = ctx.Done() - } - - select { - case err := <-call.resp: - close(call.timeout) - if err != nil { - if !c.Closed() { - // if the connection is closed then we cant release the stream, - // this is because the request is still outstanding and we have - // been handed another error from another stream which caused the - // connection to close. - c.releaseStream(stream) - } - return nil, err - } - case <-timeoutCh: - close(call.timeout) - c.handleTimeout() - return nil, ErrTimeoutNoResponse - case <-ctxDone: - close(call.timeout) - return nil, ctx.Err() - case <-c.quit: - return nil, ErrConnectionClosed - } - - // dont release the stream if detect a timeout as another request can reuse - // that stream and get a response for the old request, which we have no - // easy way of detecting. - // - // Ensure that the stream is not released if there are potentially outstanding - // requests on the stream to prevent nil pointer dereferences in recv(). - defer c.releaseStream(stream) - - if v := framer.header.version.version(); v != c.version { - return nil, NewErrProtocol("unexpected protocol version in response: got %d expected %d", v, c.version) - } - - return framer, nil -} - -type preparedStatment struct { - id []byte - request preparedMetadata - response resultMetadata -} - -type inflightPrepare struct { - wg sync.WaitGroup - err error - - preparedStatment *preparedStatment -} - -func (c *Conn) prepareStatement(ctx context.Context, stmt string, tracer Tracer) (*preparedStatment, error) { - stmtCacheKey := c.session.stmtsLRU.keyFor(c.addr, c.currentKeyspace, stmt) - flight, ok := c.session.stmtsLRU.execIfMissing(stmtCacheKey, func(lru *lru.Cache) *inflightPrepare { - flight := new(inflightPrepare) - flight.wg.Add(1) - lru.Add(stmtCacheKey, flight) - return flight - }) - - if ok { - flight.wg.Wait() - return flight.preparedStatment, flight.err - } - - prep := &writePrepareFrame{ - statement: stmt, - } - - framer, err := c.exec(ctx, prep, tracer) - if err != nil { - flight.err = err - flight.wg.Done() - c.session.stmtsLRU.remove(stmtCacheKey) - return nil, err - } - - frame, err := framer.parseFrame() - if err != nil { - flight.err = err - flight.wg.Done() - return nil, err - } - - // TODO(zariel): tidy this up, simplify handling of frame parsing so its not duplicated - // everytime we need to parse a frame. - if len(framer.traceID) > 0 && tracer != nil { - tracer.Trace(framer.traceID) - } - - switch x := frame.(type) { - case *resultPreparedFrame: - flight.preparedStatment = &preparedStatment{ - // defensively copy as we will recycle the underlying buffer after we - // return. - id: copyBytes(x.preparedID), - // the type info's should _not_ have a reference to the framers read buffer, - // therefore we can just copy them directly. - request: x.reqMeta, - response: x.respMeta, - } - case error: - flight.err = x - default: - flight.err = NewErrProtocol("Unknown type in response to prepare frame: %s", x) - } - flight.wg.Done() - - if flight.err != nil { - c.session.stmtsLRU.remove(stmtCacheKey) - } - - framerPool.Put(framer) - - return flight.preparedStatment, flight.err -} - -func marshalQueryValue(typ TypeInfo, value interface{}, dst *queryValues) error { - if named, ok := value.(*namedValue); ok { - dst.name = named.name - value = named.value - } - - if _, ok := value.(unsetColumn); !ok { - val, err := Marshal(typ, value) - if err != nil { - return err - } - - dst.value = val - } else { - dst.isUnset = true - } - - return nil -} - -func (c *Conn) executeQuery(qry *Query) *Iter { - params := queryParams{ - consistency: qry.cons, - } - - // frame checks that it is not 0 - params.serialConsistency = qry.serialCons - params.defaultTimestamp = qry.defaultTimestamp - params.defaultTimestampValue = qry.defaultTimestampValue - - if len(qry.pageState) > 0 { - params.pagingState = qry.pageState - } - if qry.pageSize > 0 { - params.pageSize = qry.pageSize - } - - var ( - frame frameWriter - info *preparedStatment - ) - - if qry.shouldPrepare() { - // Prepare all DML queries. Other queries can not be prepared. - var err error - info, err = c.prepareStatement(qry.context, qry.stmt, qry.trace) - if err != nil { - return &Iter{err: err} - } - - var values []interface{} - - if qry.binding == nil { - values = qry.values - } else { - values, err = qry.binding(&QueryInfo{ - Id: info.id, - Args: info.request.columns, - Rval: info.response.columns, - PKeyColumns: info.request.pkeyColumns, - }) - - if err != nil { - return &Iter{err: err} - } - } - - if len(values) != info.request.actualColCount { - return &Iter{err: fmt.Errorf("gocql: expected %d values send got %d", info.request.actualColCount, len(values))} - } - - params.values = make([]queryValues, len(values)) - for i := 0; i < len(values); i++ { - v := ¶ms.values[i] - value := values[i] - typ := info.request.columns[i].TypeInfo - if err := marshalQueryValue(typ, value, v); err != nil { - return &Iter{err: err} - } - } - - params.skipMeta = !(c.session.cfg.DisableSkipMetadata || qry.disableSkipMetadata) - - frame = &writeExecuteFrame{ - preparedID: info.id, - params: params, - } - } else { - frame = &writeQueryFrame{ - statement: qry.stmt, - params: params, - } - } - - framer, err := c.exec(qry.context, frame, qry.trace) - if err != nil { - return &Iter{err: err} - } - - resp, err := framer.parseFrame() - if err != nil { - return &Iter{err: err} - } - - if len(framer.traceID) > 0 && qry.trace != nil { - qry.trace.Trace(framer.traceID) - } - - switch x := resp.(type) { - case *resultVoidFrame: - return &Iter{framer: framer} - case *resultRowsFrame: - iter := &Iter{ - meta: x.meta, - framer: framer, - numRows: x.numRows, - } - - if params.skipMeta { - if info != nil { - iter.meta = info.response - iter.meta.pagingState = x.meta.pagingState - } else { - return &Iter{framer: framer, err: errors.New("gocql: did not receive metadata but prepared info is nil")} - } - } else { - iter.meta = x.meta - } - - if len(x.meta.pagingState) > 0 && !qry.disableAutoPage { - iter.next = &nextIter{ - qry: *qry, - pos: int((1 - qry.prefetch) * float64(x.numRows)), - conn: c, - } - - iter.next.qry.pageState = copyBytes(x.meta.pagingState) - if iter.next.pos < 1 { - iter.next.pos = 1 - } - } - - return iter - case *resultKeyspaceFrame: - return &Iter{framer: framer} - case *schemaChangeKeyspace, *schemaChangeTable, *schemaChangeFunction, *schemaChangeAggregate, *schemaChangeType: - iter := &Iter{framer: framer} - if err := c.awaitSchemaAgreement(); err != nil { - // TODO: should have this behind a flag - Logger.Println(err) - } - // dont return an error from this, might be a good idea to give a warning - // though. The impact of this returning an error would be that the cluster - // is not consistent with regards to its schema. - return iter - case *RequestErrUnprepared: - stmtCacheKey := c.session.stmtsLRU.keyFor(c.addr, c.currentKeyspace, qry.stmt) - if c.session.stmtsLRU.remove(stmtCacheKey) { - return c.executeQuery(qry) - } - - return &Iter{err: x, framer: framer} - case error: - return &Iter{err: x, framer: framer} - default: - return &Iter{ - err: NewErrProtocol("Unknown type in response to execute query (%T): %s", x, x), - framer: framer, - } - } -} - -func (c *Conn) Pick(qry *Query) *Conn { - if c.Closed() { - return nil - } - return c -} - -func (c *Conn) Closed() bool { - return atomic.LoadInt32(&c.closed) == 1 -} - -func (c *Conn) Address() string { - return c.addr -} - -func (c *Conn) AvailableStreams() int { - return c.streams.Available() -} - -func (c *Conn) UseKeyspace(keyspace string) error { - q := &writeQueryFrame{statement: `USE "` + keyspace + `"`} - q.params.consistency = Any - - framer, err := c.exec(context.Background(), q, nil) - if err != nil { - return err - } - - resp, err := framer.parseFrame() - if err != nil { - return err - } - - switch x := resp.(type) { - case *resultKeyspaceFrame: - case error: - return x - default: - return NewErrProtocol("unknown frame in response to USE: %v", x) - } - - c.currentKeyspace = keyspace - - return nil -} - -func (c *Conn) executeBatch(batch *Batch) *Iter { - if c.version == protoVersion1 { - return &Iter{err: ErrUnsupported} - } - - n := len(batch.Entries) - req := &writeBatchFrame{ - typ: batch.Type, - statements: make([]batchStatment, n), - consistency: batch.Cons, - serialConsistency: batch.serialCons, - defaultTimestamp: batch.defaultTimestamp, - defaultTimestampValue: batch.defaultTimestampValue, - } - - stmts := make(map[string]string, len(batch.Entries)) - - for i := 0; i < n; i++ { - entry := &batch.Entries[i] - b := &req.statements[i] - if len(entry.Args) > 0 || entry.binding != nil { - info, err := c.prepareStatement(batch.context, entry.Stmt, nil) - if err != nil { - return &Iter{err: err} - } - - var values []interface{} - if entry.binding == nil { - values = entry.Args - } else { - values, err = entry.binding(&QueryInfo{ - Id: info.id, - Args: info.request.columns, - Rval: info.response.columns, - PKeyColumns: info.request.pkeyColumns, - }) - if err != nil { - return &Iter{err: err} - } - } - - if len(values) != info.request.actualColCount { - return &Iter{err: fmt.Errorf("gocql: batch statement %d expected %d values send got %d", i, info.request.actualColCount, len(values))} - } - - b.preparedID = info.id - stmts[string(info.id)] = entry.Stmt - - b.values = make([]queryValues, info.request.actualColCount) - - for j := 0; j < info.request.actualColCount; j++ { - v := &b.values[j] - value := values[j] - typ := info.request.columns[j].TypeInfo - if err := marshalQueryValue(typ, value, v); err != nil { - return &Iter{err: err} - } - } - } else { - b.statement = entry.Stmt - } - } - - // TODO: should batch support tracing? - framer, err := c.exec(batch.context, req, nil) - if err != nil { - return &Iter{err: err} - } - - resp, err := framer.parseFrame() - if err != nil { - return &Iter{err: err, framer: framer} - } - - switch x := resp.(type) { - case *resultVoidFrame: - framerPool.Put(framer) - return &Iter{} - case *RequestErrUnprepared: - stmt, found := stmts[string(x.StatementId)] - if found { - key := c.session.stmtsLRU.keyFor(c.addr, c.currentKeyspace, stmt) - c.session.stmtsLRU.remove(key) - } - - framerPool.Put(framer) - - if found { - return c.executeBatch(batch) - } else { - return &Iter{err: x, framer: framer} - } - case *resultRowsFrame: - iter := &Iter{ - meta: x.meta, - framer: framer, - numRows: x.numRows, - } - - return iter - case error: - return &Iter{err: x, framer: framer} - default: - return &Iter{err: NewErrProtocol("Unknown type in response to batch statement: %s", x), framer: framer} - } -} - -func (c *Conn) setKeepalive(d time.Duration) error { - if tc, ok := c.conn.(*net.TCPConn); ok { - err := tc.SetKeepAlivePeriod(d) - if err != nil { - return err - } - - return tc.SetKeepAlive(true) - } - - return nil -} - -func (c *Conn) query(statement string, values ...interface{}) (iter *Iter) { - q := c.session.Query(statement, values...).Consistency(One) - return c.executeQuery(q) -} - -func (c *Conn) awaitSchemaAgreement() (err error) { - const ( - peerSchemas = "SELECT schema_version, peer FROM system.peers" - localSchemas = "SELECT schema_version FROM system.local WHERE key='local'" - ) - - var versions map[string]struct{} - - endDeadline := time.Now().Add(c.session.cfg.MaxWaitSchemaAgreement) - for time.Now().Before(endDeadline) { - iter := c.query(peerSchemas) - - versions = make(map[string]struct{}) - - var schemaVersion string - var peer string - for iter.Scan(&schemaVersion, &peer) { - if schemaVersion == "" { - Logger.Printf("skipping peer entry with empty schema_version: peer=%q", peer) - continue - } - - versions[schemaVersion] = struct{}{} - schemaVersion = "" - } - - if err = iter.Close(); err != nil { - goto cont - } - - iter = c.query(localSchemas) - for iter.Scan(&schemaVersion) { - versions[schemaVersion] = struct{}{} - schemaVersion = "" - } - - if err = iter.Close(); err != nil { - goto cont - } - - if len(versions) <= 1 { - return nil - } - - cont: - time.Sleep(200 * time.Millisecond) - } - - if err != nil { - return - } - - schemas := make([]string, 0, len(versions)) - for schema := range versions { - schemas = append(schemas, schema) - } - - // not exported - return fmt.Errorf("gocql: cluster schema versions not consistent: %+v", schemas) -} - -const localHostInfo = "SELECT * FROM system.local WHERE key='local'" - -func (c *Conn) localHostInfo() (*HostInfo, error) { - row, err := c.query(localHostInfo).rowMap() - if err != nil { - return nil, err - } - - port := c.conn.RemoteAddr().(*net.TCPAddr).Port - - // TODO(zariel): avoid doing this here - host, err := c.session.hostInfoFromMap(row, port) - if err != nil { - return nil, err - } - - return c.session.ring.addOrUpdate(host), nil -} - -var ( - ErrQueryArgLength = errors.New("gocql: query argument length mismatch") - ErrTimeoutNoResponse = errors.New("gocql: no response received from cassandra within timeout period") - ErrTooManyTimeouts = errors.New("gocql: too many query timeouts on the connection") - ErrConnectionClosed = errors.New("gocql: connection closed waiting for response") - ErrNoStreams = errors.New("gocql: no streams available on connection") -) diff --git a/vendor/github.com/gocql/gocql/connectionpool.go b/vendor/github.com/gocql/gocql/connectionpool.go deleted file mode 100644 index 7ea14c81f0..0000000000 --- a/vendor/github.com/gocql/gocql/connectionpool.go +++ /dev/null @@ -1,571 +0,0 @@ -// Copyright (c) 2012 The gocql Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gocql - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io/ioutil" - "math/rand" - "net" - "sync" - "sync/atomic" - "time" -) - -// interface to implement to receive the host information -type SetHosts interface { - SetHosts(hosts []*HostInfo) -} - -// interface to implement to receive the partitioner value -type SetPartitioner interface { - SetPartitioner(partitioner string) -} - -func setupTLSConfig(sslOpts *SslOptions) (*tls.Config, error) { - if sslOpts.Config == nil { - sslOpts.Config = &tls.Config{} - } - - // ca cert is optional - if sslOpts.CaPath != "" { - if sslOpts.RootCAs == nil { - sslOpts.RootCAs = x509.NewCertPool() - } - - pem, err := ioutil.ReadFile(sslOpts.CaPath) - if err != nil { - return nil, fmt.Errorf("connectionpool: unable to open CA certs: %v", err) - } - - if !sslOpts.RootCAs.AppendCertsFromPEM(pem) { - return nil, errors.New("connectionpool: failed parsing or CA certs") - } - } - - if sslOpts.CertPath != "" || sslOpts.KeyPath != "" { - mycert, err := tls.LoadX509KeyPair(sslOpts.CertPath, sslOpts.KeyPath) - if err != nil { - return nil, fmt.Errorf("connectionpool: unable to load X509 key pair: %v", err) - } - sslOpts.Certificates = append(sslOpts.Certificates, mycert) - } - - sslOpts.InsecureSkipVerify = !sslOpts.EnableHostVerification - - return sslOpts.Config, nil -} - -type policyConnPool struct { - session *Session - - port int - numConns int - keyspace string - - mu sync.RWMutex - hostConnPools map[string]*hostConnPool - - endpoints []string -} - -func connConfig(cfg *ClusterConfig) (*ConnConfig, error) { - var ( - err error - tlsConfig *tls.Config - ) - - // TODO(zariel): move tls config setup into session init. - if cfg.SslOpts != nil { - tlsConfig, err = setupTLSConfig(cfg.SslOpts) - if err != nil { - return nil, err - } - } - - return &ConnConfig{ - ProtoVersion: cfg.ProtoVersion, - CQLVersion: cfg.CQLVersion, - Timeout: cfg.Timeout, - ConnectTimeout: cfg.ConnectTimeout, - Compressor: cfg.Compressor, - Authenticator: cfg.Authenticator, - Keepalive: cfg.SocketKeepalive, - tlsConfig: tlsConfig, - }, nil -} - -func newPolicyConnPool(session *Session) *policyConnPool { - // create the pool - pool := &policyConnPool{ - session: session, - port: session.cfg.Port, - numConns: session.cfg.NumConns, - keyspace: session.cfg.Keyspace, - hostConnPools: map[string]*hostConnPool{}, - } - - pool.endpoints = make([]string, len(session.cfg.Hosts)) - copy(pool.endpoints, session.cfg.Hosts) - - return pool -} - -func (p *policyConnPool) SetHosts(hosts []*HostInfo) { - p.mu.Lock() - defer p.mu.Unlock() - - toRemove := make(map[string]struct{}) - for addr := range p.hostConnPools { - toRemove[addr] = struct{}{} - } - - pools := make(chan *hostConnPool) - createCount := 0 - for _, host := range hosts { - if !host.IsUp() { - // don't create a connection pool for a down host - continue - } - ip := host.ConnectAddress().String() - if _, exists := p.hostConnPools[ip]; exists { - // still have this host, so don't remove it - delete(toRemove, ip) - continue - } - - createCount++ - go func(host *HostInfo) { - // create a connection pool for the host - pools <- newHostConnPool( - p.session, - host, - p.port, - p.numConns, - p.keyspace, - ) - }(host) - } - - // add created pools - for createCount > 0 { - pool := <-pools - createCount-- - if pool.Size() > 0 { - // add pool only if there a connections available - p.hostConnPools[string(pool.host.ConnectAddress())] = pool - } - } - - for addr := range toRemove { - pool := p.hostConnPools[addr] - delete(p.hostConnPools, addr) - go pool.Close() - } -} - -func (p *policyConnPool) Size() int { - p.mu.RLock() - count := 0 - for _, pool := range p.hostConnPools { - count += pool.Size() - } - p.mu.RUnlock() - - return count -} - -func (p *policyConnPool) getPool(host *HostInfo) (pool *hostConnPool, ok bool) { - ip := host.ConnectAddress().String() - p.mu.RLock() - pool, ok = p.hostConnPools[ip] - p.mu.RUnlock() - return -} - -func (p *policyConnPool) Close() { - p.mu.Lock() - defer p.mu.Unlock() - - // close the pools - for addr, pool := range p.hostConnPools { - delete(p.hostConnPools, addr) - pool.Close() - } -} - -func (p *policyConnPool) addHost(host *HostInfo) { - ip := host.ConnectAddress().String() - p.mu.Lock() - pool, ok := p.hostConnPools[ip] - if !ok { - pool = newHostConnPool( - p.session, - host, - host.Port(), // TODO: if port == 0 use pool.port? - p.numConns, - p.keyspace, - ) - - p.hostConnPools[ip] = pool - } - p.mu.Unlock() - - pool.fill() -} - -func (p *policyConnPool) removeHost(ip net.IP) { - k := ip.String() - p.mu.Lock() - pool, ok := p.hostConnPools[k] - if !ok { - p.mu.Unlock() - return - } - - delete(p.hostConnPools, k) - p.mu.Unlock() - - go pool.Close() -} - -func (p *policyConnPool) hostUp(host *HostInfo) { - // TODO(zariel): have a set of up hosts and down hosts, we can internally - // detect down hosts, then try to reconnect to them. - p.addHost(host) -} - -func (p *policyConnPool) hostDown(ip net.IP) { - // TODO(zariel): mark host as down so we can try to connect to it later, for - // now just treat it has removed. - p.removeHost(ip) -} - -// hostConnPool is a connection pool for a single host. -// Connection selection is based on a provided ConnSelectionPolicy -type hostConnPool struct { - session *Session - host *HostInfo - port int - addr string - size int - keyspace string - // protection for conns, closed, filling - mu sync.RWMutex - conns []*Conn - closed bool - filling bool - - pos uint32 -} - -func (h *hostConnPool) String() string { - h.mu.RLock() - defer h.mu.RUnlock() - return fmt.Sprintf("[filling=%v closed=%v conns=%v size=%v host=%v]", - h.filling, h.closed, len(h.conns), h.size, h.host) -} - -func newHostConnPool(session *Session, host *HostInfo, port, size int, - keyspace string) *hostConnPool { - - pool := &hostConnPool{ - session: session, - host: host, - port: port, - addr: (&net.TCPAddr{IP: host.ConnectAddress(), Port: host.Port()}).String(), - size: size, - keyspace: keyspace, - conns: make([]*Conn, 0, size), - filling: false, - closed: false, - } - - // the pool is not filled or connected - return pool -} - -// Pick a connection from this connection pool for the given query. -func (pool *hostConnPool) Pick() *Conn { - pool.mu.RLock() - defer pool.mu.RUnlock() - - if pool.closed { - return nil - } - - size := len(pool.conns) - if size < pool.size { - // try to fill the pool - go pool.fill() - - if size == 0 { - return nil - } - } - - pos := int(atomic.AddUint32(&pool.pos, 1) - 1) - - var ( - leastBusyConn *Conn - streamsAvailable int - ) - - // find the conn which has the most available streams, this is racy - for i := 0; i < size; i++ { - conn := pool.conns[(pos+i)%size] - if streams := conn.AvailableStreams(); streams > streamsAvailable { - leastBusyConn = conn - streamsAvailable = streams - } - } - - return leastBusyConn -} - -//Size returns the number of connections currently active in the pool -func (pool *hostConnPool) Size() int { - pool.mu.RLock() - defer pool.mu.RUnlock() - - return len(pool.conns) -} - -//Close the connection pool -func (pool *hostConnPool) Close() { - pool.mu.Lock() - - if pool.closed { - pool.mu.Unlock() - return - } - pool.closed = true - - // ensure we dont try to reacquire the lock in handleError - // TODO: improve this as the following can happen - // 1) we have locked pool.mu write lock - // 2) conn.Close calls conn.closeWithError(nil) - // 3) conn.closeWithError calls conn.Close() which returns an error - // 4) conn.closeWithError calls pool.HandleError with the error from conn.Close - // 5) pool.HandleError tries to lock pool.mu - // deadlock - - // empty the pool - conns := pool.conns - pool.conns = nil - - pool.mu.Unlock() - - // close the connections - for _, conn := range conns { - conn.Close() - } -} - -// Fill the connection pool -func (pool *hostConnPool) fill() { - pool.mu.RLock() - // avoid filling a closed pool, or concurrent filling - if pool.closed || pool.filling { - pool.mu.RUnlock() - return - } - - // determine the filling work to be done - startCount := len(pool.conns) - fillCount := pool.size - startCount - - // avoid filling a full (or overfull) pool - if fillCount <= 0 { - pool.mu.RUnlock() - return - } - - // switch from read to write lock - pool.mu.RUnlock() - pool.mu.Lock() - - // double check everything since the lock was released - startCount = len(pool.conns) - fillCount = pool.size - startCount - if pool.closed || pool.filling || fillCount <= 0 { - // looks like another goroutine already beat this - // goroutine to the filling - pool.mu.Unlock() - return - } - - // ok fill the pool - pool.filling = true - - // allow others to access the pool while filling - pool.mu.Unlock() - // only this goroutine should make calls to fill/empty the pool at this - // point until after this routine or its subordinates calls - // fillingStopped - - // fill only the first connection synchronously - if startCount == 0 { - err := pool.connect() - pool.logConnectErr(err) - - if err != nil { - // probably unreachable host - pool.fillingStopped(true) - - // this is call with the connection pool mutex held, this call will - // then recursively try to lock it again. FIXME - go pool.session.handleNodeDown(pool.host.ConnectAddress(), pool.port) - return - } - - // filled one - fillCount-- - } - - // fill the rest of the pool asynchronously - go func() { - err := pool.connectMany(fillCount) - - // mark the end of filling - pool.fillingStopped(err != nil) - }() -} - -func (pool *hostConnPool) logConnectErr(err error) { - if opErr, ok := err.(*net.OpError); ok && (opErr.Op == "dial" || opErr.Op == "read") { - // connection refused - // these are typical during a node outage so avoid log spam. - if gocqlDebug { - Logger.Printf("unable to dial %q: %v\n", pool.host.ConnectAddress(), err) - } - } else if err != nil { - // unexpected error - Logger.Printf("error: failed to connect to %s due to error: %v", pool.addr, err) - } -} - -// transition back to a not-filling state. -func (pool *hostConnPool) fillingStopped(hadError bool) { - if hadError { - // wait for some time to avoid back-to-back filling - // this provides some time between failed attempts - // to fill the pool for the host to recover - time.Sleep(time.Duration(rand.Int31n(100)+31) * time.Millisecond) - } - - pool.mu.Lock() - pool.filling = false - pool.mu.Unlock() -} - -// connectMany creates new connections concurrent. -func (pool *hostConnPool) connectMany(count int) error { - if count == 0 { - return nil - } - var ( - wg sync.WaitGroup - mu sync.Mutex - connectErr error - ) - wg.Add(count) - for i := 0; i < count; i++ { - go func() { - defer wg.Done() - err := pool.connect() - pool.logConnectErr(err) - if err != nil { - mu.Lock() - connectErr = err - mu.Unlock() - } - }() - } - // wait for all connections are done - wg.Wait() - - return connectErr -} - -// create a new connection to the host and add it to the pool -func (pool *hostConnPool) connect() (err error) { - // TODO: provide a more robust connection retry mechanism, we should also - // be able to detect hosts that come up by trying to connect to downed ones. - const maxAttempts = 3 - // try to connect - var conn *Conn - for i := 0; i < maxAttempts; i++ { - conn, err = pool.session.connect(pool.host, pool) - if err == nil { - break - } - if opErr, isOpErr := err.(*net.OpError); isOpErr { - // if the error is not a temporary error (ex: network unreachable) don't - // retry - if !opErr.Temporary() { - break - } - } - } - - if err != nil { - return err - } - - if pool.keyspace != "" { - // set the keyspace - if err = conn.UseKeyspace(pool.keyspace); err != nil { - conn.Close() - return err - } - } - - // add the Conn to the pool - pool.mu.Lock() - defer pool.mu.Unlock() - - if pool.closed { - conn.Close() - return nil - } - - pool.conns = append(pool.conns, conn) - - return nil -} - -// handle any error from a Conn -func (pool *hostConnPool) HandleError(conn *Conn, err error, closed bool) { - if !closed { - // still an open connection, so continue using it - return - } - - // TODO: track the number of errors per host and detect when a host is dead, - // then also have something which can detect when a host comes back. - pool.mu.Lock() - defer pool.mu.Unlock() - - if pool.closed { - // pool closed - return - } - - // find the connection index - for i, candidate := range pool.conns { - if candidate == conn { - // remove the connection, not preserving order - pool.conns[i], pool.conns = pool.conns[len(pool.conns)-1], pool.conns[:len(pool.conns)-1] - - // lost a connection, so fill the pool - go pool.fill() - break - } - } -} diff --git a/vendor/github.com/gocql/gocql/control.go b/vendor/github.com/gocql/gocql/control.go deleted file mode 100644 index 482782393d..0000000000 --- a/vendor/github.com/gocql/gocql/control.go +++ /dev/null @@ -1,480 +0,0 @@ -package gocql - -import ( - "context" - crand "crypto/rand" - "errors" - "fmt" - "math/rand" - "net" - "os" - "regexp" - "strconv" - "sync" - "sync/atomic" - "time" -) - -var ( - randr *rand.Rand - mutRandr sync.Mutex -) - -func init() { - b := make([]byte, 4) - if _, err := crand.Read(b); err != nil { - panic(fmt.Sprintf("unable to seed random number generator: %v", err)) - } - - randr = rand.New(rand.NewSource(int64(readInt(b)))) -} - -// Ensure that the atomic variable is aligned to a 64bit boundary -// so that atomic operations can be applied on 32bit architectures. -type controlConn struct { - started int32 - reconnecting int32 - - session *Session - conn atomic.Value - - retry RetryPolicy - - quit chan struct{} -} - -func createControlConn(session *Session) *controlConn { - control := &controlConn{ - session: session, - quit: make(chan struct{}), - retry: &SimpleRetryPolicy{NumRetries: 3}, - } - - control.conn.Store((*connHost)(nil)) - - return control -} - -func (c *controlConn) heartBeat() { - if !atomic.CompareAndSwapInt32(&c.started, 0, 1) { - return - } - - sleepTime := 1 * time.Second - timer := time.NewTimer(sleepTime) - defer timer.Stop() - - for { - timer.Reset(sleepTime) - - select { - case <-c.quit: - return - case <-timer.C: - } - - resp, err := c.writeFrame(&writeOptionsFrame{}) - if err != nil { - goto reconn - } - - switch resp.(type) { - case *supportedFrame: - // Everything ok - sleepTime = 5 * time.Second - continue - case error: - goto reconn - default: - panic(fmt.Sprintf("gocql: unknown frame in response to options: %T", resp)) - } - - reconn: - // try to connect a bit faster - sleepTime = 1 * time.Second - c.reconnect(true) - continue - } -} - -var hostLookupPreferV4 = os.Getenv("GOCQL_HOST_LOOKUP_PREFER_V4") == "true" - -func hostInfo(addr string, defaultPort int) ([]*HostInfo, error) { - var port int - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - host = addr - port = defaultPort - } else { - port, err = strconv.Atoi(portStr) - if err != nil { - return nil, err - } - } - - var hosts []*HostInfo - - // Check if host is a literal IP address - if ip := net.ParseIP(host); ip != nil { - hosts = append(hosts, &HostInfo{connectAddress: ip, port: port}) - return hosts, nil - } - - // Look up host in DNS - ips, err := net.LookupIP(host) - if err != nil { - return nil, err - } else if len(ips) == 0 { - return nil, fmt.Errorf("No IP's returned from DNS lookup for %q", addr) - } - - // Filter to v4 addresses if any present - if hostLookupPreferV4 { - var preferredIPs []net.IP - for _, v := range ips { - if v4 := v.To4(); v4 != nil { - preferredIPs = append(preferredIPs, v4) - } - } - if len(preferredIPs) != 0 { - ips = preferredIPs - } - } - - for _, ip := range ips { - hosts = append(hosts, &HostInfo{connectAddress: ip, port: port}) - } - - return hosts, nil -} - -func shuffleHosts(hosts []*HostInfo) []*HostInfo { - mutRandr.Lock() - perm := randr.Perm(len(hosts)) - mutRandr.Unlock() - shuffled := make([]*HostInfo, len(hosts)) - - for i, host := range hosts { - shuffled[perm[i]] = host - } - - return shuffled -} - -func (c *controlConn) shuffleDial(endpoints []*HostInfo) (*Conn, error) { - // shuffle endpoints so not all drivers will connect to the same initial - // node. - shuffled := shuffleHosts(endpoints) - - var err error - for _, host := range shuffled { - var conn *Conn - conn, err = c.session.connect(host, c) - if err == nil { - return conn, nil - } - - Logger.Printf("gocql: unable to dial control conn %v: %v\n", host.ConnectAddress(), err) - } - - return nil, err -} - -// this is going to be version dependant and a nightmare to maintain :( -var protocolSupportRe = regexp.MustCompile(`the lowest supported version is \d+ and the greatest is (\d+)$`) - -func parseProtocolFromError(err error) int { - // I really wish this had the actual info in the error frame... - matches := protocolSupportRe.FindAllStringSubmatch(err.Error(), -1) - if len(matches) != 1 || len(matches[0]) != 2 { - if verr, ok := err.(*protocolError); ok { - return int(verr.frame.Header().version.version()) - } - return 0 - } - - max, err := strconv.Atoi(matches[0][1]) - if err != nil { - return 0 - } - - return max -} - -func (c *controlConn) discoverProtocol(hosts []*HostInfo) (int, error) { - hosts = shuffleHosts(hosts) - - connCfg := *c.session.connCfg - connCfg.ProtoVersion = 4 // TODO: define maxProtocol - - handler := connErrorHandlerFn(func(c *Conn, err error, closed bool) { - // we should never get here, but if we do it means we connected to a - // host successfully which means our attempted protocol version worked - if !closed { - c.Close() - } - }) - - var err error - for _, host := range hosts { - var conn *Conn - conn, err = c.session.dial(host.ConnectAddress(), host.Port(), &connCfg, handler) - if conn != nil { - conn.Close() - } - - if err == nil { - return connCfg.ProtoVersion, nil - } - - if proto := parseProtocolFromError(err); proto > 0 { - return proto, nil - } - } - - return 0, err -} - -func (c *controlConn) connect(hosts []*HostInfo) error { - if len(hosts) == 0 { - return errors.New("control: no endpoints specified") - } - - conn, err := c.shuffleDial(hosts) - if err != nil { - return fmt.Errorf("control: unable to connect to initial hosts: %v", err) - } - - if err := c.setupConn(conn); err != nil { - conn.Close() - return fmt.Errorf("control: unable to setup connection: %v", err) - } - - // we could fetch the initial ring here and update initial host data. So that - // when we return from here we have a ring topology ready to go. - - go c.heartBeat() - - return nil -} - -type connHost struct { - conn *Conn - host *HostInfo -} - -func (c *controlConn) setupConn(conn *Conn) error { - if err := c.registerEvents(conn); err != nil { - conn.Close() - return err - } - - // TODO(zariel): do we need to fetch host info everytime - // the control conn connects? Surely we have it cached? - host, err := conn.localHostInfo() - if err != nil { - return err - } - - ch := &connHost{ - conn: conn, - host: host, - } - - c.conn.Store(ch) - c.session.handleNodeUp(host.ConnectAddress(), host.Port(), false) - - return nil -} - -func (c *controlConn) registerEvents(conn *Conn) error { - var events []string - - if !c.session.cfg.Events.DisableTopologyEvents { - events = append(events, "TOPOLOGY_CHANGE") - } - if !c.session.cfg.Events.DisableNodeStatusEvents { - events = append(events, "STATUS_CHANGE") - } - if !c.session.cfg.Events.DisableSchemaEvents { - events = append(events, "SCHEMA_CHANGE") - } - - if len(events) == 0 { - return nil - } - - framer, err := conn.exec(context.Background(), - &writeRegisterFrame{ - events: events, - }, nil) - if err != nil { - return err - } - - frame, err := framer.parseFrame() - if err != nil { - return err - } else if _, ok := frame.(*readyFrame); !ok { - return fmt.Errorf("unexpected frame in response to register: got %T: %v\n", frame, frame) - } - - return nil -} - -func (c *controlConn) reconnect(refreshring bool) { - if !atomic.CompareAndSwapInt32(&c.reconnecting, 0, 1) { - return - } - defer atomic.StoreInt32(&c.reconnecting, 0) - // TODO: simplify this function, use session.ring to get hosts instead of the - // connection pool - - var host *HostInfo - ch := c.getConn() - if ch != nil { - host = ch.host - ch.conn.Close() - } - - var newConn *Conn - if host != nil { - // try to connect to the old host - conn, err := c.session.connect(host, c) - if err != nil { - // host is dead - // TODO: this is replicated in a few places - c.session.handleNodeDown(host.ConnectAddress(), host.Port()) - } else { - newConn = conn - } - } - - // TODO: should have our own round-robin for hosts so that we can try each - // in succession and guarantee that we get a different host each time. - if newConn == nil { - host := c.session.ring.rrHost() - if host == nil { - c.connect(c.session.ring.endpoints) - return - } - - var err error - newConn, err = c.session.connect(host, c) - if err != nil { - // TODO: add log handler for things like this - return - } - } - - if err := c.setupConn(newConn); err != nil { - newConn.Close() - Logger.Printf("gocql: control unable to register events: %v\n", err) - return - } - - if refreshring { - c.session.hostSource.refreshRing() - } -} - -func (c *controlConn) HandleError(conn *Conn, err error, closed bool) { - if !closed { - return - } - - oldConn := c.getConn() - if oldConn.conn != conn { - return - } - - c.reconnect(false) -} - -func (c *controlConn) getConn() *connHost { - return c.conn.Load().(*connHost) -} - -func (c *controlConn) writeFrame(w frameWriter) (frame, error) { - ch := c.getConn() - if ch == nil { - return nil, errNoControl - } - - framer, err := ch.conn.exec(context.Background(), w, nil) - if err != nil { - return nil, err - } - - return framer.parseFrame() -} - -func (c *controlConn) withConnHost(fn func(*connHost) *Iter) *Iter { - const maxConnectAttempts = 5 - connectAttempts := 0 - - for i := 0; i < maxConnectAttempts; i++ { - ch := c.getConn() - if ch == nil { - if connectAttempts > maxConnectAttempts { - break - } - - connectAttempts++ - - c.reconnect(false) - continue - } - - return fn(ch) - } - - return &Iter{err: errNoControl} -} - -func (c *controlConn) withConn(fn func(*Conn) *Iter) *Iter { - return c.withConnHost(func(ch *connHost) *Iter { - return fn(ch.conn) - }) -} - -// query will return nil if the connection is closed or nil -func (c *controlConn) query(statement string, values ...interface{}) (iter *Iter) { - q := c.session.Query(statement, values...).Consistency(One).RoutingKey([]byte{}).Trace(nil) - - for { - iter = c.withConn(func(conn *Conn) *Iter { - return conn.executeQuery(q) - }) - - if gocqlDebug && iter.err != nil { - Logger.Printf("control: error executing %q: %v\n", statement, iter.err) - } - - q.attempts++ - if iter.err == nil || !c.retry.Attempt(q) { - break - } - } - - return -} - -func (c *controlConn) awaitSchemaAgreement() error { - return c.withConn(func(conn *Conn) *Iter { - return &Iter{err: conn.awaitSchemaAgreement()} - }).err -} - -func (c *controlConn) close() { - if atomic.CompareAndSwapInt32(&c.started, 1, -1) { - c.quit <- struct{}{} - } - - ch := c.getConn() - if ch != nil { - ch.conn.Close() - } -} - -var errNoControl = errors.New("gocql: no control connection available") diff --git a/vendor/github.com/gocql/gocql/debug_off.go b/vendor/github.com/gocql/gocql/debug_off.go deleted file mode 100644 index 3af3ae0f3e..0000000000 --- a/vendor/github.com/gocql/gocql/debug_off.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !gocql_debug - -package gocql - -const gocqlDebug = false diff --git a/vendor/github.com/gocql/gocql/debug_on.go b/vendor/github.com/gocql/gocql/debug_on.go deleted file mode 100644 index e94a00ce5b..0000000000 --- a/vendor/github.com/gocql/gocql/debug_on.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build gocql_debug - -package gocql - -const gocqlDebug = true diff --git a/vendor/github.com/gocql/gocql/doc.go b/vendor/github.com/gocql/gocql/doc.go deleted file mode 100644 index f661cf65f3..0000000000 --- a/vendor/github.com/gocql/gocql/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) 2012-2015 The gocql Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gocql implements a fast and robust Cassandra driver for the -// Go programming language. -package gocql - -// TODO(tux21b): write more docs. diff --git a/vendor/github.com/gocql/gocql/errors.go b/vendor/github.com/gocql/gocql/errors.go deleted file mode 100644 index b87c6fac0b..0000000000 --- a/vendor/github.com/gocql/gocql/errors.go +++ /dev/null @@ -1,116 +0,0 @@ -package gocql - -import "fmt" - -const ( - errServer = 0x0000 - errProtocol = 0x000A - errCredentials = 0x0100 - errUnavailable = 0x1000 - errOverloaded = 0x1001 - errBootstrapping = 0x1002 - errTruncate = 0x1003 - errWriteTimeout = 0x1100 - errReadTimeout = 0x1200 - errReadFailure = 0x1300 - errFunctionFailure = 0x1400 - errWriteFailure = 0x1500 - errSyntax = 0x2000 - errUnauthorized = 0x2100 - errInvalid = 0x2200 - errConfig = 0x2300 - errAlreadyExists = 0x2400 - errUnprepared = 0x2500 -) - -type RequestError interface { - Code() int - Message() string - Error() string -} - -type errorFrame struct { - frameHeader - - code int - message string -} - -func (e errorFrame) Code() int { - return e.code -} - -func (e errorFrame) Message() string { - return e.message -} - -func (e errorFrame) Error() string { - return e.Message() -} - -func (e errorFrame) String() string { - return fmt.Sprintf("[error code=%x message=%q]", e.code, e.message) -} - -type RequestErrUnavailable struct { - errorFrame - Consistency Consistency - Required int - Alive int -} - -func (e *RequestErrUnavailable) String() string { - return fmt.Sprintf("[request_error_unavailable consistency=%s required=%d alive=%d]", e.Consistency, e.Required, e.Alive) -} - -type RequestErrWriteTimeout struct { - errorFrame - Consistency Consistency - Received int - BlockFor int - WriteType string -} - -type RequestErrWriteFailure struct { - errorFrame - Consistency Consistency - Received int - BlockFor int - NumFailures int - WriteType string -} - -type RequestErrReadTimeout struct { - errorFrame - Consistency Consistency - Received int - BlockFor int - DataPresent byte -} - -type RequestErrAlreadyExists struct { - errorFrame - Keyspace string - Table string -} - -type RequestErrUnprepared struct { - errorFrame - StatementId []byte -} - -type RequestErrReadFailure struct { - errorFrame - Consistency Consistency - Received int - BlockFor int - NumFailures int - DataPresent bool -} - -type RequestErrFunctionFailure struct { - errorFrame - Keyspace string - Function string - ArgTypes []string -} diff --git a/vendor/github.com/gocql/gocql/events.go b/vendor/github.com/gocql/gocql/events.go deleted file mode 100644 index 5b65f4d646..0000000000 --- a/vendor/github.com/gocql/gocql/events.go +++ /dev/null @@ -1,295 +0,0 @@ -package gocql - -import ( - "net" - "sync" - "time" -) - -type eventDebouncer struct { - name string - timer *time.Timer - mu sync.Mutex - events []frame - - callback func([]frame) - quit chan struct{} -} - -func newEventDebouncer(name string, eventHandler func([]frame)) *eventDebouncer { - e := &eventDebouncer{ - name: name, - quit: make(chan struct{}), - timer: time.NewTimer(eventDebounceTime), - callback: eventHandler, - } - e.timer.Stop() - go e.flusher() - - return e -} - -func (e *eventDebouncer) stop() { - e.quit <- struct{}{} // sync with flusher - close(e.quit) -} - -func (e *eventDebouncer) flusher() { - for { - select { - case <-e.timer.C: - e.mu.Lock() - e.flush() - e.mu.Unlock() - case <-e.quit: - return - } - } -} - -const ( - eventBufferSize = 1000 - eventDebounceTime = 1 * time.Second -) - -// flush must be called with mu locked -func (e *eventDebouncer) flush() { - if len(e.events) == 0 { - return - } - - // if the flush interval is faster than the callback then we will end up calling - // the callback multiple times, probably a bad idea. In this case we could drop - // frames? - go e.callback(e.events) - e.events = make([]frame, 0, eventBufferSize) -} - -func (e *eventDebouncer) debounce(frame frame) { - e.mu.Lock() - e.timer.Reset(eventDebounceTime) - - // TODO: probably need a warning to track if this threshold is too low - if len(e.events) < eventBufferSize { - e.events = append(e.events, frame) - } else { - Logger.Printf("%s: buffer full, dropping event frame: %s", e.name, frame) - } - - e.mu.Unlock() -} - -func (s *Session) handleEvent(framer *framer) { - defer framerPool.Put(framer) - - frame, err := framer.parseFrame() - if err != nil { - // TODO: logger - Logger.Printf("gocql: unable to parse event frame: %v\n", err) - return - } - - if gocqlDebug { - Logger.Printf("gocql: handling frame: %v\n", frame) - } - - switch f := frame.(type) { - case *schemaChangeKeyspace, *schemaChangeFunction, - *schemaChangeTable, *schemaChangeAggregate, *schemaChangeType: - - s.schemaEvents.debounce(frame) - case *topologyChangeEventFrame, *statusChangeEventFrame: - s.nodeEvents.debounce(frame) - default: - Logger.Printf("gocql: invalid event frame (%T): %v\n", f, f) - } -} - -func (s *Session) handleSchemaEvent(frames []frame) { - // TODO: debounce events - for _, frame := range frames { - switch f := frame.(type) { - case *schemaChangeKeyspace: - s.schemaDescriber.clearSchema(f.keyspace) - s.handleKeyspaceChange(f.keyspace, f.change) - case *schemaChangeTable: - s.schemaDescriber.clearSchema(f.keyspace) - case *schemaChangeAggregate: - s.schemaDescriber.clearSchema(f.keyspace) - case *schemaChangeFunction: - s.schemaDescriber.clearSchema(f.keyspace) - case *schemaChangeType: - s.schemaDescriber.clearSchema(f.keyspace) - } - } -} - -func (s *Session) handleKeyspaceChange(keyspace, change string) { - s.control.awaitSchemaAgreement() - s.policy.KeyspaceChanged(KeyspaceUpdateEvent{Keyspace: keyspace, Change: change}) -} - -func (s *Session) handleNodeEvent(frames []frame) { - type nodeEvent struct { - change string - host net.IP - port int - } - - events := make(map[string]*nodeEvent) - - for _, frame := range frames { - // TODO: can we be sure the order of events in the buffer is correct? - switch f := frame.(type) { - case *topologyChangeEventFrame: - event, ok := events[f.host.String()] - if !ok { - event = &nodeEvent{change: f.change, host: f.host, port: f.port} - events[f.host.String()] = event - } - event.change = f.change - - case *statusChangeEventFrame: - event, ok := events[f.host.String()] - if !ok { - event = &nodeEvent{change: f.change, host: f.host, port: f.port} - events[f.host.String()] = event - } - event.change = f.change - } - } - - for _, f := range events { - if gocqlDebug { - Logger.Printf("gocql: dispatching event: %+v\n", f) - } - - switch f.change { - case "NEW_NODE": - s.handleNewNode(f.host, f.port, true) - case "REMOVED_NODE": - s.handleRemovedNode(f.host, f.port) - case "MOVED_NODE": - // java-driver handles this, not mentioned in the spec - // TODO(zariel): refresh token map - case "UP": - s.handleNodeUp(f.host, f.port, true) - case "DOWN": - s.handleNodeDown(f.host, f.port) - } - } -} - -func (s *Session) addNewNode(host *HostInfo) { - if s.cfg.filterHost(host) { - return - } - - host.setState(NodeUp) - s.pool.addHost(host) - s.policy.AddHost(host) -} - -func (s *Session) handleNewNode(ip net.IP, port int, waitForBinary bool) { - if gocqlDebug { - Logger.Printf("gocql: Session.handleNewNode: %s:%d\n", ip.String(), port) - } - - ip, port = s.cfg.translateAddressPort(ip, port) - - // Get host info and apply any filters to the host - hostInfo, err := s.hostSource.getHostInfo(ip, port) - if err != nil { - Logger.Printf("gocql: events: unable to fetch host info for (%s:%d): %v\n", ip, port, err) - return - } else if hostInfo == nil { - // If hostInfo is nil, this host was filtered out by cfg.HostFilter - return - } - - if t := hostInfo.Version().nodeUpDelay(); t > 0 && waitForBinary { - time.Sleep(t) - } - - // should this handle token moving? - hostInfo = s.ring.addOrUpdate(hostInfo) - - s.addNewNode(hostInfo) - - if s.control != nil && !s.cfg.IgnorePeerAddr { - // TODO(zariel): debounce ring refresh - s.hostSource.refreshRing() - } -} - -func (s *Session) handleRemovedNode(ip net.IP, port int) { - if gocqlDebug { - Logger.Printf("gocql: Session.handleRemovedNode: %s:%d\n", ip.String(), port) - } - - ip, port = s.cfg.translateAddressPort(ip, port) - - // we remove all nodes but only add ones which pass the filter - host := s.ring.getHost(ip) - if host == nil { - host = &HostInfo{connectAddress: ip, port: port} - } - - if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) { - return - } - - host.setState(NodeDown) - s.policy.RemoveHost(host) - s.pool.removeHost(ip) - s.ring.removeHost(ip) - - if !s.cfg.IgnorePeerAddr { - s.hostSource.refreshRing() - } -} - -func (s *Session) handleNodeUp(eventIp net.IP, eventPort int, waitForBinary bool) { - if gocqlDebug { - Logger.Printf("gocql: Session.handleNodeUp: %s:%d\n", eventIp.String(), eventPort) - } - - ip, _ := s.cfg.translateAddressPort(eventIp, eventPort) - - host := s.ring.getHost(ip) - if host == nil { - // TODO(zariel): avoid the need to translate twice in this - // case - s.handleNewNode(eventIp, eventPort, waitForBinary) - return - } - - if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) { - return - } - - if t := host.Version().nodeUpDelay(); t > 0 && waitForBinary { - time.Sleep(t) - } - - s.addNewNode(host) -} - -func (s *Session) handleNodeDown(ip net.IP, port int) { - if gocqlDebug { - Logger.Printf("gocql: Session.handleNodeDown: %s:%d\n", ip.String(), port) - } - - host := s.ring.getHost(ip) - if host == nil { - host = &HostInfo{connectAddress: ip, port: port} - } - - if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) { - return - } - - host.setState(NodeDown) - s.policy.HostDown(host) - s.pool.hostDown(ip) -} diff --git a/vendor/github.com/gocql/gocql/filters.go b/vendor/github.com/gocql/gocql/filters.go deleted file mode 100644 index 32e6ce66cd..0000000000 --- a/vendor/github.com/gocql/gocql/filters.go +++ /dev/null @@ -1,57 +0,0 @@ -package gocql - -import "fmt" - -// HostFilter interface is used when a host is discovered via server sent events. -type HostFilter interface { - // Called when a new host is discovered, returning true will cause the host - // to be added to the pools. - Accept(host *HostInfo) bool -} - -// HostFilterFunc converts a func(host HostInfo) bool into a HostFilter -type HostFilterFunc func(host *HostInfo) bool - -func (fn HostFilterFunc) Accept(host *HostInfo) bool { - return fn(host) -} - -// AcceptAllFilter will accept all hosts -func AcceptAllFilter() HostFilter { - return HostFilterFunc(func(host *HostInfo) bool { - return true - }) -} - -func DenyAllFilter() HostFilter { - return HostFilterFunc(func(host *HostInfo) bool { - return false - }) -} - -// DataCentreHostFilter filters all hosts such that they are in the same data centre -// as the supplied data centre. -func DataCentreHostFilter(dataCentre string) HostFilter { - return HostFilterFunc(func(host *HostInfo) bool { - return host.DataCenter() == dataCentre - }) -} - -// WhiteListHostFilter filters incoming hosts by checking that their address is -// in the initial hosts whitelist. -func WhiteListHostFilter(hosts ...string) HostFilter { - hostInfos, err := addrsToHosts(hosts, 9042) - if err != nil { - // dont want to panic here, but rather not break the API - panic(fmt.Errorf("unable to lookup host info from address: %v", err)) - } - - m := make(map[string]bool, len(hostInfos)) - for _, host := range hostInfos { - m[host.ConnectAddress().String()] = true - } - - return HostFilterFunc(func(host *HostInfo) bool { - return m[host.ConnectAddress().String()] - }) -} diff --git a/vendor/github.com/gocql/gocql/frame.go b/vendor/github.com/gocql/gocql/frame.go deleted file mode 100644 index 66074563ea..0000000000 --- a/vendor/github.com/gocql/gocql/frame.go +++ /dev/null @@ -1,1943 +0,0 @@ -// Copyright (c) 2012 The gocql Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gocql - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "runtime" - "strings" - "sync" - "time" -) - -type unsetColumn struct{} - -var UnsetValue = unsetColumn{} - -type namedValue struct { - name string - value interface{} -} - -// NamedValue produce a value which will bind to the named parameter in a query -func NamedValue(name string, value interface{}) interface{} { - return &namedValue{ - name: name, - value: value, - } -} - -const ( - protoDirectionMask = 0x80 - protoVersionMask = 0x7F - protoVersion1 = 0x01 - protoVersion2 = 0x02 - protoVersion3 = 0x03 - protoVersion4 = 0x04 - protoVersion5 = 0x05 - - maxFrameSize = 256 * 1024 * 1024 -) - -type protoVersion byte - -func (p protoVersion) request() bool { - return p&protoDirectionMask == 0x00 -} - -func (p protoVersion) response() bool { - return p&protoDirectionMask == 0x80 -} - -func (p protoVersion) version() byte { - return byte(p) & protoVersionMask -} - -func (p protoVersion) String() string { - dir := "REQ" - if p.response() { - dir = "RESP" - } - - return fmt.Sprintf("[version=%d direction=%s]", p.version(), dir) -} - -type frameOp byte - -const ( - // header ops - opError frameOp = 0x00 - opStartup frameOp = 0x01 - opReady frameOp = 0x02 - opAuthenticate frameOp = 0x03 - opOptions frameOp = 0x05 - opSupported frameOp = 0x06 - opQuery frameOp = 0x07 - opResult frameOp = 0x08 - opPrepare frameOp = 0x09 - opExecute frameOp = 0x0A - opRegister frameOp = 0x0B - opEvent frameOp = 0x0C - opBatch frameOp = 0x0D - opAuthChallenge frameOp = 0x0E - opAuthResponse frameOp = 0x0F - opAuthSuccess frameOp = 0x10 -) - -func (f frameOp) String() string { - switch f { - case opError: - return "ERROR" - case opStartup: - return "STARTUP" - case opReady: - return "READY" - case opAuthenticate: - return "AUTHENTICATE" - case opOptions: - return "OPTIONS" - case opSupported: - return "SUPPORTED" - case opQuery: - return "QUERY" - case opResult: - return "RESULT" - case opPrepare: - return "PREPARE" - case opExecute: - return "EXECUTE" - case opRegister: - return "REGISTER" - case opEvent: - return "EVENT" - case opBatch: - return "BATCH" - case opAuthChallenge: - return "AUTH_CHALLENGE" - case opAuthResponse: - return "AUTH_RESPONSE" - case opAuthSuccess: - return "AUTH_SUCCESS" - default: - return fmt.Sprintf("UNKNOWN_OP_%d", f) - } -} - -const ( - // result kind - resultKindVoid = 1 - resultKindRows = 2 - resultKindKeyspace = 3 - resultKindPrepared = 4 - resultKindSchemaChanged = 5 - - // rows flags - flagGlobalTableSpec int = 0x01 - flagHasMorePages int = 0x02 - flagNoMetaData int = 0x04 - - // query flags - flagValues byte = 0x01 - flagSkipMetaData byte = 0x02 - flagPageSize byte = 0x04 - flagWithPagingState byte = 0x08 - flagWithSerialConsistency byte = 0x10 - flagDefaultTimestamp byte = 0x20 - flagWithNameValues byte = 0x40 - - // header flags - flagCompress byte = 0x01 - flagTracing byte = 0x02 - flagCustomPayload byte = 0x04 - flagWarning byte = 0x08 -) - -type Consistency uint16 - -const ( - Any Consistency = 0x00 - One Consistency = 0x01 - Two Consistency = 0x02 - Three Consistency = 0x03 - Quorum Consistency = 0x04 - All Consistency = 0x05 - LocalQuorum Consistency = 0x06 - EachQuorum Consistency = 0x07 - LocalOne Consistency = 0x0A -) - -func (c Consistency) String() string { - switch c { - case Any: - return "ANY" - case One: - return "ONE" - case Two: - return "TWO" - case Three: - return "THREE" - case Quorum: - return "QUORUM" - case All: - return "ALL" - case LocalQuorum: - return "LOCAL_QUORUM" - case EachQuorum: - return "EACH_QUORUM" - case LocalOne: - return "LOCAL_ONE" - default: - return fmt.Sprintf("UNKNOWN_CONS_0x%x", uint16(c)) - } -} - -func (c Consistency) MarshalText() (text []byte, err error) { - return []byte(c.String()), nil -} - -func (c *Consistency) UnmarshalText(text []byte) error { - switch string(text) { - case "ANY": - *c = Any - case "ONE": - *c = One - case "TWO": - *c = Two - case "THREE": - *c = Three - case "QUORUM": - *c = Quorum - case "ALL": - *c = All - case "LOCAL_QUORUM": - *c = LocalQuorum - case "EACH_QUORUM": - *c = EachQuorum - case "LOCAL_ONE": - *c = LocalOne - default: - return fmt.Errorf("invalid consistency %q", string(text)) - } - - return nil -} - -func ParseConsistency(s string) Consistency { - var c Consistency - if err := c.UnmarshalText([]byte(strings.ToUpper(s))); err != nil { - panic(err) - } - return c -} - -// ParseConsistencyWrapper wraps gocql.ParseConsistency to provide an err -// return instead of a panic -func ParseConsistencyWrapper(s string) (consistency Consistency, err error) { - err = consistency.UnmarshalText([]byte(strings.ToUpper(s))) - return -} - -// MustParseConsistency is the same as ParseConsistency except it returns -// an error (never). It is kept here since breaking changes are not good. -// DEPRECATED: use ParseConsistency if you want a panic on parse error. -func MustParseConsistency(s string) (Consistency, error) { - c, err := ParseConsistencyWrapper(s) - if err != nil { - panic(err) - } - return c, nil -} - -type SerialConsistency uint16 - -const ( - Serial SerialConsistency = 0x08 - LocalSerial SerialConsistency = 0x09 -) - -func (s SerialConsistency) String() string { - switch s { - case Serial: - return "SERIAL" - case LocalSerial: - return "LOCAL_SERIAL" - default: - return fmt.Sprintf("UNKNOWN_SERIAL_CONS_0x%x", uint16(s)) - } -} - -func (s SerialConsistency) MarshalText() (text []byte, err error) { - return []byte(s.String()), nil -} - -func (s *SerialConsistency) UnmarshalText(text []byte) error { - switch string(text) { - case "SERIAL": - *s = Serial - case "LOCAL_SERIAL": - *s = LocalSerial - default: - return fmt.Errorf("invalid consistency %q", string(text)) - } - - return nil -} - -const ( - apacheCassandraTypePrefix = "org.apache.cassandra.db.marshal." -) - -var ( - ErrFrameTooBig = errors.New("frame length is bigger than the maximum allowed") -) - -const maxFrameHeaderSize = 9 - -func writeInt(p []byte, n int32) { - p[0] = byte(n >> 24) - p[1] = byte(n >> 16) - p[2] = byte(n >> 8) - p[3] = byte(n) -} - -func readInt(p []byte) int32 { - return int32(p[0])<<24 | int32(p[1])<<16 | int32(p[2])<<8 | int32(p[3]) -} - -func writeShort(p []byte, n uint16) { - p[0] = byte(n >> 8) - p[1] = byte(n) -} - -func readShort(p []byte) uint16 { - return uint16(p[0])<<8 | uint16(p[1]) -} - -type frameHeader struct { - version protoVersion - flags byte - stream int - op frameOp - length int - customPayload map[string][]byte - warnings []string -} - -func (f frameHeader) String() string { - return fmt.Sprintf("[header version=%s flags=0x%x stream=%d op=%s length=%d]", f.version, f.flags, f.stream, f.op, f.length) -} - -func (f frameHeader) Header() frameHeader { - return f -} - -const defaultBufSize = 128 - -var framerPool = sync.Pool{ - New: func() interface{} { - return &framer{ - wbuf: make([]byte, defaultBufSize), - readBuffer: make([]byte, defaultBufSize), - } - }, -} - -// a framer is responsible for reading, writing and parsing frames on a single stream -type framer struct { - r io.Reader - w io.Writer - - proto byte - // flags are for outgoing flags, enabling compression and tracing etc - flags byte - compres Compressor - headSize int - // if this frame was read then the header will be here - header *frameHeader - - // if tracing flag is set this is not nil - traceID []byte - - // holds a ref to the whole byte slice for rbuf so that it can be reset to - // 0 after a read. - readBuffer []byte - - rbuf []byte - wbuf []byte -} - -func newFramer(r io.Reader, w io.Writer, compressor Compressor, version byte) *framer { - f := framerPool.Get().(*framer) - var flags byte - if compressor != nil { - flags |= flagCompress - } - - version &= protoVersionMask - - headSize := 8 - if version > protoVersion2 { - headSize = 9 - } - - f.compres = compressor - f.proto = version - f.flags = flags - f.headSize = headSize - - f.r = r - f.rbuf = f.readBuffer[:0] - - f.w = w - f.wbuf = f.wbuf[:0] - - f.header = nil - f.traceID = nil - - return f -} - -type frame interface { - Header() frameHeader -} - -func readHeader(r io.Reader, p []byte) (head frameHeader, err error) { - _, err = io.ReadFull(r, p[:1]) - if err != nil { - return frameHeader{}, err - } - - version := p[0] & protoVersionMask - - if version < protoVersion1 || version > protoVersion4 { - return frameHeader{}, fmt.Errorf("gocql: unsupported protocol response version: %d", version) - } - - headSize := 9 - if version < protoVersion3 { - headSize = 8 - } - - _, err = io.ReadFull(r, p[1:headSize]) - if err != nil { - return frameHeader{}, err - } - - p = p[:headSize] - - head.version = protoVersion(p[0]) - head.flags = p[1] - - if version > protoVersion2 { - if len(p) != 9 { - return frameHeader{}, fmt.Errorf("not enough bytes to read header require 9 got: %d", len(p)) - } - - head.stream = int(int16(p[2])<<8 | int16(p[3])) - head.op = frameOp(p[4]) - head.length = int(readInt(p[5:])) - } else { - if len(p) != 8 { - return frameHeader{}, fmt.Errorf("not enough bytes to read header require 8 got: %d", len(p)) - } - - head.stream = int(int8(p[2])) - head.op = frameOp(p[3]) - head.length = int(readInt(p[4:])) - } - - return head, nil -} - -// explicitly enables tracing for the framers outgoing requests -func (f *framer) trace() { - f.flags |= flagTracing -} - -// reads a frame form the wire into the framers buffer -func (f *framer) readFrame(head *frameHeader) error { - if head.length < 0 { - return fmt.Errorf("frame body length can not be less than 0: %d", head.length) - } else if head.length > maxFrameSize { - // need to free up the connection to be used again - _, err := io.CopyN(ioutil.Discard, f.r, int64(head.length)) - if err != nil { - return fmt.Errorf("error whilst trying to discard frame with invalid length: %v", err) - } - return ErrFrameTooBig - } - - if cap(f.readBuffer) >= head.length { - f.rbuf = f.readBuffer[:head.length] - } else { - f.readBuffer = make([]byte, head.length) - f.rbuf = f.readBuffer - } - - // assume the underlying reader takes care of timeouts and retries - n, err := io.ReadFull(f.r, f.rbuf) - if err != nil { - return fmt.Errorf("unable to read frame body: read %d/%d bytes: %v", n, head.length, err) - } - - if head.flags&flagCompress == flagCompress { - if f.compres == nil { - return NewErrProtocol("no compressor available with compressed frame body") - } - - f.rbuf, err = f.compres.Decode(f.rbuf) - if err != nil { - return err - } - } - - f.header = head - return nil -} - -func (f *framer) parseFrame() (frame frame, err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - if f.header.version.request() { - return nil, NewErrProtocol("got a request frame from server: %v", f.header.version) - } - - if f.header.flags&flagTracing == flagTracing { - f.readTrace() - } - - if f.header.flags&flagWarning == flagWarning { - f.header.warnings = f.readStringList() - } - - if f.header.flags&flagCustomPayload == flagCustomPayload { - f.header.customPayload = f.readBytesMap() - } - - // assumes that the frame body has been read into rbuf - switch f.header.op { - case opError: - frame = f.parseErrorFrame() - case opReady: - frame = f.parseReadyFrame() - case opResult: - frame, err = f.parseResultFrame() - case opSupported: - frame = f.parseSupportedFrame() - case opAuthenticate: - frame = f.parseAuthenticateFrame() - case opAuthChallenge: - frame = f.parseAuthChallengeFrame() - case opAuthSuccess: - frame = f.parseAuthSuccessFrame() - case opEvent: - frame = f.parseEventFrame() - default: - return nil, NewErrProtocol("unknown op in frame header: %s", f.header.op) - } - - return -} - -func (f *framer) parseErrorFrame() frame { - code := f.readInt() - msg := f.readString() - - errD := errorFrame{ - frameHeader: *f.header, - code: code, - message: msg, - } - - switch code { - case errUnavailable: - cl := f.readConsistency() - required := f.readInt() - alive := f.readInt() - return &RequestErrUnavailable{ - errorFrame: errD, - Consistency: cl, - Required: required, - Alive: alive, - } - case errWriteTimeout: - cl := f.readConsistency() - received := f.readInt() - blockfor := f.readInt() - writeType := f.readString() - return &RequestErrWriteTimeout{ - errorFrame: errD, - Consistency: cl, - Received: received, - BlockFor: blockfor, - WriteType: writeType, - } - case errReadTimeout: - cl := f.readConsistency() - received := f.readInt() - blockfor := f.readInt() - dataPresent := f.readByte() - return &RequestErrReadTimeout{ - errorFrame: errD, - Consistency: cl, - Received: received, - BlockFor: blockfor, - DataPresent: dataPresent, - } - case errAlreadyExists: - ks := f.readString() - table := f.readString() - return &RequestErrAlreadyExists{ - errorFrame: errD, - Keyspace: ks, - Table: table, - } - case errUnprepared: - stmtId := f.readShortBytes() - return &RequestErrUnprepared{ - errorFrame: errD, - StatementId: copyBytes(stmtId), // defensively copy - } - case errReadFailure: - res := &RequestErrReadFailure{ - errorFrame: errD, - } - res.Consistency = f.readConsistency() - res.Received = f.readInt() - res.BlockFor = f.readInt() - res.DataPresent = f.readByte() != 0 - return res - case errWriteFailure: - res := &RequestErrWriteFailure{ - errorFrame: errD, - } - res.Consistency = f.readConsistency() - res.Received = f.readInt() - res.BlockFor = f.readInt() - res.NumFailures = f.readInt() - res.WriteType = f.readString() - return res - case errFunctionFailure: - res := RequestErrFunctionFailure{ - errorFrame: errD, - } - res.Keyspace = f.readString() - res.Function = f.readString() - res.ArgTypes = f.readStringList() - return res - case errInvalid, errBootstrapping, errConfig, errCredentials, errOverloaded, - errProtocol, errServer, errSyntax, errTruncate, errUnauthorized: - // TODO(zariel): we should have some distinct types for these errors - return errD - default: - panic(fmt.Errorf("unknown error code: 0x%x", errD.code)) - } -} - -func (f *framer) writeHeader(flags byte, op frameOp, stream int) { - f.wbuf = f.wbuf[:0] - f.wbuf = append(f.wbuf, - f.proto, - flags, - ) - - if f.proto > protoVersion2 { - f.wbuf = append(f.wbuf, - byte(stream>>8), - byte(stream), - ) - } else { - f.wbuf = append(f.wbuf, - byte(stream), - ) - } - - // pad out length - f.wbuf = append(f.wbuf, - byte(op), - 0, - 0, - 0, - 0, - ) -} - -func (f *framer) setLength(length int) { - p := 4 - if f.proto > protoVersion2 { - p = 5 - } - - f.wbuf[p+0] = byte(length >> 24) - f.wbuf[p+1] = byte(length >> 16) - f.wbuf[p+2] = byte(length >> 8) - f.wbuf[p+3] = byte(length) -} - -func (f *framer) finishWrite() error { - if len(f.wbuf) > maxFrameSize { - // huge app frame, lets remove it so it doesn't bloat the heap - f.wbuf = make([]byte, defaultBufSize) - return ErrFrameTooBig - } - - if f.wbuf[1]&flagCompress == flagCompress { - if f.compres == nil { - panic("compress flag set with no compressor") - } - - // TODO: only compress frames which are big enough - compressed, err := f.compres.Encode(f.wbuf[f.headSize:]) - if err != nil { - return err - } - - f.wbuf = append(f.wbuf[:f.headSize], compressed...) - } - length := len(f.wbuf) - f.headSize - f.setLength(length) - - _, err := f.w.Write(f.wbuf) - if err != nil { - return err - } - - return nil -} - -func (f *framer) readTrace() { - f.traceID = f.readUUID().Bytes() -} - -type readyFrame struct { - frameHeader -} - -func (f *framer) parseReadyFrame() frame { - return &readyFrame{ - frameHeader: *f.header, - } -} - -type supportedFrame struct { - frameHeader - - supported map[string][]string -} - -// TODO: if we move the body buffer onto the frameHeader then we only need a single -// framer, and can move the methods onto the header. -func (f *framer) parseSupportedFrame() frame { - return &supportedFrame{ - frameHeader: *f.header, - - supported: f.readStringMultiMap(), - } -} - -type writeStartupFrame struct { - opts map[string]string -} - -func (w writeStartupFrame) String() string { - return fmt.Sprintf("[startup opts=%+v]", w.opts) -} - -func (w *writeStartupFrame) writeFrame(framer *framer, streamID int) error { - return framer.writeStartupFrame(streamID, w.opts) -} - -func (f *framer) writeStartupFrame(streamID int, options map[string]string) error { - f.writeHeader(f.flags&^flagCompress, opStartup, streamID) - f.writeStringMap(options) - - return f.finishWrite() -} - -type writePrepareFrame struct { - statement string -} - -func (w *writePrepareFrame) writeFrame(framer *framer, streamID int) error { - return framer.writePrepareFrame(streamID, w.statement) -} - -func (f *framer) writePrepareFrame(stream int, statement string) error { - f.writeHeader(f.flags, opPrepare, stream) - f.writeLongString(statement) - return f.finishWrite() -} - -func (f *framer) readTypeInfo() TypeInfo { - // TODO: factor this out so the same code paths can be used to parse custom - // types and other types, as much of the logic will be duplicated. - id := f.readShort() - - simple := NativeType{ - proto: f.proto, - typ: Type(id), - } - - if simple.typ == TypeCustom { - simple.custom = f.readString() - if cassType := getApacheCassandraType(simple.custom); cassType != TypeCustom { - simple.typ = cassType - } - } - - switch simple.typ { - case TypeTuple: - n := f.readShort() - tuple := TupleTypeInfo{ - NativeType: simple, - Elems: make([]TypeInfo, n), - } - - for i := 0; i < int(n); i++ { - tuple.Elems[i] = f.readTypeInfo() - } - - return tuple - - case TypeUDT: - udt := UDTTypeInfo{ - NativeType: simple, - } - udt.KeySpace = f.readString() - udt.Name = f.readString() - - n := f.readShort() - udt.Elements = make([]UDTField, n) - for i := 0; i < int(n); i++ { - field := &udt.Elements[i] - field.Name = f.readString() - field.Type = f.readTypeInfo() - } - - return udt - case TypeMap, TypeList, TypeSet: - collection := CollectionType{ - NativeType: simple, - } - - if simple.typ == TypeMap { - collection.Key = f.readTypeInfo() - } - - collection.Elem = f.readTypeInfo() - - return collection - } - - return simple -} - -type preparedMetadata struct { - resultMetadata - - // proto v4+ - pkeyColumns []int -} - -func (r preparedMetadata) String() string { - return fmt.Sprintf("[prepared flags=0x%x pkey=%v paging_state=% X columns=%v col_count=%d actual_col_count=%d]", r.flags, r.pkeyColumns, r.pagingState, r.columns, r.colCount, r.actualColCount) -} - -func (f *framer) parsePreparedMetadata() preparedMetadata { - // TODO: deduplicate this from parseMetadata - meta := preparedMetadata{} - - meta.flags = f.readInt() - meta.colCount = f.readInt() - if meta.colCount < 0 { - panic(fmt.Errorf("received negative column count: %d", meta.colCount)) - } - meta.actualColCount = meta.colCount - - if f.proto >= protoVersion4 { - pkeyCount := f.readInt() - pkeys := make([]int, pkeyCount) - for i := 0; i < pkeyCount; i++ { - pkeys[i] = int(f.readShort()) - } - meta.pkeyColumns = pkeys - } - - if meta.flags&flagHasMorePages == flagHasMorePages { - meta.pagingState = copyBytes(f.readBytes()) - } - - if meta.flags&flagNoMetaData == flagNoMetaData { - return meta - } - - var keyspace, table string - globalSpec := meta.flags&flagGlobalTableSpec == flagGlobalTableSpec - if globalSpec { - keyspace = f.readString() - table = f.readString() - } - - var cols []ColumnInfo - if meta.colCount < 1000 { - // preallocate columninfo to avoid excess copying - cols = make([]ColumnInfo, meta.colCount) - for i := 0; i < meta.colCount; i++ { - f.readCol(&cols[i], &meta.resultMetadata, globalSpec, keyspace, table) - } - } else { - // use append, huge number of columns usually indicates a corrupt frame or - // just a huge row. - for i := 0; i < meta.colCount; i++ { - var col ColumnInfo - f.readCol(&col, &meta.resultMetadata, globalSpec, keyspace, table) - cols = append(cols, col) - } - } - - meta.columns = cols - - return meta -} - -type resultMetadata struct { - flags int - - // only if flagPageState - pagingState []byte - - columns []ColumnInfo - colCount int - - // this is a count of the total number of columns which can be scanned, - // it is at minimum len(columns) but may be larger, for instance when a column - // is a UDT or tuple. - actualColCount int -} - -func (r resultMetadata) String() string { - return fmt.Sprintf("[metadata flags=0x%x paging_state=% X columns=%v]", r.flags, r.pagingState, r.columns) -} - -func (f *framer) readCol(col *ColumnInfo, meta *resultMetadata, globalSpec bool, keyspace, table string) { - if !globalSpec { - col.Keyspace = f.readString() - col.Table = f.readString() - } else { - col.Keyspace = keyspace - col.Table = table - } - - col.Name = f.readString() - col.TypeInfo = f.readTypeInfo() - switch v := col.TypeInfo.(type) { - // maybe also UDT - case TupleTypeInfo: - // -1 because we already included the tuple column - meta.actualColCount += len(v.Elems) - 1 - } -} - -func (f *framer) parseResultMetadata() resultMetadata { - var meta resultMetadata - - meta.flags = f.readInt() - meta.colCount = f.readInt() - if meta.colCount < 0 { - panic(fmt.Errorf("received negative column count: %d", meta.colCount)) - } - meta.actualColCount = meta.colCount - - if meta.flags&flagHasMorePages == flagHasMorePages { - meta.pagingState = copyBytes(f.readBytes()) - } - - if meta.flags&flagNoMetaData == flagNoMetaData { - return meta - } - - var keyspace, table string - globalSpec := meta.flags&flagGlobalTableSpec == flagGlobalTableSpec - if globalSpec { - keyspace = f.readString() - table = f.readString() - } - - var cols []ColumnInfo - if meta.colCount < 1000 { - // preallocate columninfo to avoid excess copying - cols = make([]ColumnInfo, meta.colCount) - for i := 0; i < meta.colCount; i++ { - f.readCol(&cols[i], &meta, globalSpec, keyspace, table) - } - - } else { - // use append, huge number of columns usually indicates a corrupt frame or - // just a huge row. - for i := 0; i < meta.colCount; i++ { - var col ColumnInfo - f.readCol(&col, &meta, globalSpec, keyspace, table) - cols = append(cols, col) - } - } - - meta.columns = cols - - return meta -} - -type resultVoidFrame struct { - frameHeader -} - -func (f *resultVoidFrame) String() string { - return "[result_void]" -} - -func (f *framer) parseResultFrame() (frame, error) { - kind := f.readInt() - - switch kind { - case resultKindVoid: - return &resultVoidFrame{frameHeader: *f.header}, nil - case resultKindRows: - return f.parseResultRows(), nil - case resultKindKeyspace: - return f.parseResultSetKeyspace(), nil - case resultKindPrepared: - return f.parseResultPrepared(), nil - case resultKindSchemaChanged: - return f.parseResultSchemaChange(), nil - } - - return nil, NewErrProtocol("unknown result kind: %x", kind) -} - -type resultRowsFrame struct { - frameHeader - - meta resultMetadata - // dont parse the rows here as we only need to do it once - numRows int -} - -func (f *resultRowsFrame) String() string { - return fmt.Sprintf("[result_rows meta=%v]", f.meta) -} - -func (f *framer) parseResultRows() frame { - result := &resultRowsFrame{} - result.meta = f.parseResultMetadata() - - result.numRows = f.readInt() - if result.numRows < 0 { - panic(fmt.Errorf("invalid row_count in result frame: %d", result.numRows)) - } - - return result -} - -type resultKeyspaceFrame struct { - frameHeader - keyspace string -} - -func (r *resultKeyspaceFrame) String() string { - return fmt.Sprintf("[result_keyspace keyspace=%s]", r.keyspace) -} - -func (f *framer) parseResultSetKeyspace() frame { - return &resultKeyspaceFrame{ - frameHeader: *f.header, - keyspace: f.readString(), - } -} - -type resultPreparedFrame struct { - frameHeader - - preparedID []byte - reqMeta preparedMetadata - respMeta resultMetadata -} - -func (f *framer) parseResultPrepared() frame { - frame := &resultPreparedFrame{ - frameHeader: *f.header, - preparedID: f.readShortBytes(), - reqMeta: f.parsePreparedMetadata(), - } - - if f.proto < protoVersion2 { - return frame - } - - frame.respMeta = f.parseResultMetadata() - - return frame -} - -type schemaChangeKeyspace struct { - frameHeader - - change string - keyspace string -} - -func (f schemaChangeKeyspace) String() string { - return fmt.Sprintf("[event schema_change_keyspace change=%q keyspace=%q]", f.change, f.keyspace) -} - -type schemaChangeTable struct { - frameHeader - - change string - keyspace string - object string -} - -func (f schemaChangeTable) String() string { - return fmt.Sprintf("[event schema_change change=%q keyspace=%q object=%q]", f.change, f.keyspace, f.object) -} - -type schemaChangeType struct { - frameHeader - - change string - keyspace string - object string -} - -type schemaChangeFunction struct { - frameHeader - - change string - keyspace string - name string - args []string -} - -type schemaChangeAggregate struct { - frameHeader - - change string - keyspace string - name string - args []string -} - -func (f *framer) parseResultSchemaChange() frame { - if f.proto <= protoVersion2 { - change := f.readString() - keyspace := f.readString() - table := f.readString() - - if table != "" { - return &schemaChangeTable{ - frameHeader: *f.header, - change: change, - keyspace: keyspace, - object: table, - } - } else { - return &schemaChangeKeyspace{ - frameHeader: *f.header, - change: change, - keyspace: keyspace, - } - } - } else { - change := f.readString() - target := f.readString() - - // TODO: could just use a separate type for each target - switch target { - case "KEYSPACE": - frame := &schemaChangeKeyspace{ - frameHeader: *f.header, - change: change, - } - - frame.keyspace = f.readString() - - return frame - case "TABLE": - frame := &schemaChangeTable{ - frameHeader: *f.header, - change: change, - } - - frame.keyspace = f.readString() - frame.object = f.readString() - - return frame - case "TYPE": - frame := &schemaChangeType{ - frameHeader: *f.header, - change: change, - } - - frame.keyspace = f.readString() - frame.object = f.readString() - - return frame - case "FUNCTION": - frame := &schemaChangeFunction{ - frameHeader: *f.header, - change: change, - } - - frame.keyspace = f.readString() - frame.name = f.readString() - frame.args = f.readStringList() - - return frame - case "AGGREGATE": - frame := &schemaChangeAggregate{ - frameHeader: *f.header, - change: change, - } - - frame.keyspace = f.readString() - frame.name = f.readString() - frame.args = f.readStringList() - - return frame - default: - panic(fmt.Errorf("gocql: unknown SCHEMA_CHANGE target: %q change: %q", target, change)) - } - } - -} - -type authenticateFrame struct { - frameHeader - - class string -} - -func (a *authenticateFrame) String() string { - return fmt.Sprintf("[authenticate class=%q]", a.class) -} - -func (f *framer) parseAuthenticateFrame() frame { - return &authenticateFrame{ - frameHeader: *f.header, - class: f.readString(), - } -} - -type authSuccessFrame struct { - frameHeader - - data []byte -} - -func (a *authSuccessFrame) String() string { - return fmt.Sprintf("[auth_success data=%q]", a.data) -} - -func (f *framer) parseAuthSuccessFrame() frame { - return &authSuccessFrame{ - frameHeader: *f.header, - data: f.readBytes(), - } -} - -type authChallengeFrame struct { - frameHeader - - data []byte -} - -func (a *authChallengeFrame) String() string { - return fmt.Sprintf("[auth_challenge data=%q]", a.data) -} - -func (f *framer) parseAuthChallengeFrame() frame { - return &authChallengeFrame{ - frameHeader: *f.header, - data: f.readBytes(), - } -} - -type statusChangeEventFrame struct { - frameHeader - - change string - host net.IP - port int -} - -func (t statusChangeEventFrame) String() string { - return fmt.Sprintf("[status_change change=%s host=%v port=%v]", t.change, t.host, t.port) -} - -// essentially the same as statusChange -type topologyChangeEventFrame struct { - frameHeader - - change string - host net.IP - port int -} - -func (t topologyChangeEventFrame) String() string { - return fmt.Sprintf("[topology_change change=%s host=%v port=%v]", t.change, t.host, t.port) -} - -func (f *framer) parseEventFrame() frame { - eventType := f.readString() - - switch eventType { - case "TOPOLOGY_CHANGE": - frame := &topologyChangeEventFrame{frameHeader: *f.header} - frame.change = f.readString() - frame.host, frame.port = f.readInet() - - return frame - case "STATUS_CHANGE": - frame := &statusChangeEventFrame{frameHeader: *f.header} - frame.change = f.readString() - frame.host, frame.port = f.readInet() - - return frame - case "SCHEMA_CHANGE": - // this should work for all versions - return f.parseResultSchemaChange() - default: - panic(fmt.Errorf("gocql: unknown event type: %q", eventType)) - } - -} - -type writeAuthResponseFrame struct { - data []byte -} - -func (a *writeAuthResponseFrame) String() string { - return fmt.Sprintf("[auth_response data=%q]", a.data) -} - -func (a *writeAuthResponseFrame) writeFrame(framer *framer, streamID int) error { - return framer.writeAuthResponseFrame(streamID, a.data) -} - -func (f *framer) writeAuthResponseFrame(streamID int, data []byte) error { - f.writeHeader(f.flags, opAuthResponse, streamID) - f.writeBytes(data) - return f.finishWrite() -} - -type queryValues struct { - value []byte - - // optional name, will set With names for values flag - name string - isUnset bool -} - -type queryParams struct { - consistency Consistency - // v2+ - skipMeta bool - values []queryValues - pageSize int - pagingState []byte - serialConsistency SerialConsistency - // v3+ - defaultTimestamp bool - defaultTimestampValue int64 -} - -func (q queryParams) String() string { - return fmt.Sprintf("[query_params consistency=%v skip_meta=%v page_size=%d paging_state=%q serial_consistency=%v default_timestamp=%v values=%v]", - q.consistency, q.skipMeta, q.pageSize, q.pagingState, q.serialConsistency, q.defaultTimestamp, q.values) -} - -func (f *framer) writeQueryParams(opts *queryParams) { - f.writeConsistency(opts.consistency) - - if f.proto == protoVersion1 { - return - } - - var flags byte - if len(opts.values) > 0 { - flags |= flagValues - } - if opts.skipMeta { - flags |= flagSkipMetaData - } - if opts.pageSize > 0 { - flags |= flagPageSize - } - if len(opts.pagingState) > 0 { - flags |= flagWithPagingState - } - if opts.serialConsistency > 0 { - flags |= flagWithSerialConsistency - } - - names := false - - // protoV3 specific things - if f.proto > protoVersion2 { - if opts.defaultTimestamp { - flags |= flagDefaultTimestamp - } - - if len(opts.values) > 0 && opts.values[0].name != "" { - flags |= flagWithNameValues - names = true - } - } - - f.writeByte(flags) - - if n := len(opts.values); n > 0 { - f.writeShort(uint16(n)) - - for i := 0; i < n; i++ { - if names { - f.writeString(opts.values[i].name) - } - if opts.values[i].isUnset { - f.writeUnset() - } else { - f.writeBytes(opts.values[i].value) - } - } - } - - if opts.pageSize > 0 { - f.writeInt(int32(opts.pageSize)) - } - - if len(opts.pagingState) > 0 { - f.writeBytes(opts.pagingState) - } - - if opts.serialConsistency > 0 { - f.writeConsistency(Consistency(opts.serialConsistency)) - } - - if f.proto > protoVersion2 && opts.defaultTimestamp { - // timestamp in microseconds - var ts int64 - if opts.defaultTimestampValue != 0 { - ts = opts.defaultTimestampValue - } else { - ts = time.Now().UnixNano() / 1000 - } - f.writeLong(ts) - } -} - -type writeQueryFrame struct { - statement string - params queryParams -} - -func (w *writeQueryFrame) String() string { - return fmt.Sprintf("[query statement=%q params=%v]", w.statement, w.params) -} - -func (w *writeQueryFrame) writeFrame(framer *framer, streamID int) error { - return framer.writeQueryFrame(streamID, w.statement, &w.params) -} - -func (f *framer) writeQueryFrame(streamID int, statement string, params *queryParams) error { - f.writeHeader(f.flags, opQuery, streamID) - f.writeLongString(statement) - f.writeQueryParams(params) - - return f.finishWrite() -} - -type frameWriter interface { - writeFrame(framer *framer, streamID int) error -} - -type frameWriterFunc func(framer *framer, streamID int) error - -func (f frameWriterFunc) writeFrame(framer *framer, streamID int) error { - return f(framer, streamID) -} - -type writeExecuteFrame struct { - preparedID []byte - params queryParams -} - -func (e *writeExecuteFrame) String() string { - return fmt.Sprintf("[execute id=% X params=%v]", e.preparedID, &e.params) -} - -func (e *writeExecuteFrame) writeFrame(fr *framer, streamID int) error { - return fr.writeExecuteFrame(streamID, e.preparedID, &e.params) -} - -func (f *framer) writeExecuteFrame(streamID int, preparedID []byte, params *queryParams) error { - f.writeHeader(f.flags, opExecute, streamID) - f.writeShortBytes(preparedID) - if f.proto > protoVersion1 { - f.writeQueryParams(params) - } else { - n := len(params.values) - f.writeShort(uint16(n)) - for i := 0; i < n; i++ { - if params.values[i].isUnset { - f.writeUnset() - } else { - f.writeBytes(params.values[i].value) - } - } - f.writeConsistency(params.consistency) - } - - return f.finishWrite() -} - -// TODO: can we replace BatchStatemt with batchStatement? As they prety much -// duplicate each other -type batchStatment struct { - preparedID []byte - statement string - values []queryValues -} - -type writeBatchFrame struct { - typ BatchType - statements []batchStatment - consistency Consistency - - // v3+ - serialConsistency SerialConsistency - defaultTimestamp bool - defaultTimestampValue int64 -} - -func (w *writeBatchFrame) writeFrame(framer *framer, streamID int) error { - return framer.writeBatchFrame(streamID, w) -} - -func (f *framer) writeBatchFrame(streamID int, w *writeBatchFrame) error { - f.writeHeader(f.flags, opBatch, streamID) - f.writeByte(byte(w.typ)) - - n := len(w.statements) - f.writeShort(uint16(n)) - - var flags byte - - for i := 0; i < n; i++ { - b := &w.statements[i] - if len(b.preparedID) == 0 { - f.writeByte(0) - f.writeLongString(b.statement) - } else { - f.writeByte(1) - f.writeShortBytes(b.preparedID) - } - - f.writeShort(uint16(len(b.values))) - for j := range b.values { - col := b.values[j] - if f.proto > protoVersion2 && col.name != "" { - // TODO: move this check into the caller and set a flag on writeBatchFrame - // to indicate using named values - if f.proto <= protoVersion5 { - return fmt.Errorf("gocql: named query values are not supported in batches, please see https://issues.apache.org/jira/browse/CASSANDRA-10246") - } - flags |= flagWithNameValues - f.writeString(col.name) - } - if col.isUnset { - f.writeUnset() - } else { - f.writeBytes(col.value) - } - } - } - - f.writeConsistency(w.consistency) - - if f.proto > protoVersion2 { - if w.serialConsistency > 0 { - flags |= flagWithSerialConsistency - } - if w.defaultTimestamp { - flags |= flagDefaultTimestamp - } - - f.writeByte(flags) - - if w.serialConsistency > 0 { - f.writeConsistency(Consistency(w.serialConsistency)) - } - - if w.defaultTimestamp { - var ts int64 - if w.defaultTimestampValue != 0 { - ts = w.defaultTimestampValue - } else { - ts = time.Now().UnixNano() / 1000 - } - f.writeLong(ts) - } - } - - return f.finishWrite() -} - -type writeOptionsFrame struct{} - -func (w *writeOptionsFrame) writeFrame(framer *framer, streamID int) error { - return framer.writeOptionsFrame(streamID, w) -} - -func (f *framer) writeOptionsFrame(stream int, _ *writeOptionsFrame) error { - f.writeHeader(f.flags, opOptions, stream) - return f.finishWrite() -} - -type writeRegisterFrame struct { - events []string -} - -func (w *writeRegisterFrame) writeFrame(framer *framer, streamID int) error { - return framer.writeRegisterFrame(streamID, w) -} - -func (f *framer) writeRegisterFrame(streamID int, w *writeRegisterFrame) error { - f.writeHeader(f.flags, opRegister, streamID) - f.writeStringList(w.events) - - return f.finishWrite() -} - -func (f *framer) readByte() byte { - if len(f.rbuf) < 1 { - panic(fmt.Errorf("not enough bytes in buffer to read byte require 1 got: %d", len(f.rbuf))) - } - - b := f.rbuf[0] - f.rbuf = f.rbuf[1:] - return b -} - -func (f *framer) readInt() (n int) { - if len(f.rbuf) < 4 { - panic(fmt.Errorf("not enough bytes in buffer to read int require 4 got: %d", len(f.rbuf))) - } - - n = int(int32(f.rbuf[0])<<24 | int32(f.rbuf[1])<<16 | int32(f.rbuf[2])<<8 | int32(f.rbuf[3])) - f.rbuf = f.rbuf[4:] - return -} - -func (f *framer) readShort() (n uint16) { - if len(f.rbuf) < 2 { - panic(fmt.Errorf("not enough bytes in buffer to read short require 2 got: %d", len(f.rbuf))) - } - n = uint16(f.rbuf[0])<<8 | uint16(f.rbuf[1]) - f.rbuf = f.rbuf[2:] - return -} - -func (f *framer) readLong() (n int64) { - if len(f.rbuf) < 8 { - panic(fmt.Errorf("not enough bytes in buffer to read long require 8 got: %d", len(f.rbuf))) - } - n = int64(f.rbuf[0])<<56 | int64(f.rbuf[1])<<48 | int64(f.rbuf[2])<<40 | int64(f.rbuf[3])<<32 | - int64(f.rbuf[4])<<24 | int64(f.rbuf[5])<<16 | int64(f.rbuf[6])<<8 | int64(f.rbuf[7]) - f.rbuf = f.rbuf[8:] - return -} - -func (f *framer) readString() (s string) { - size := f.readShort() - - if len(f.rbuf) < int(size) { - panic(fmt.Errorf("not enough bytes in buffer to read string require %d got: %d", size, len(f.rbuf))) - } - - s = string(f.rbuf[:size]) - f.rbuf = f.rbuf[size:] - return -} - -func (f *framer) readLongString() (s string) { - size := f.readInt() - - if len(f.rbuf) < size { - panic(fmt.Errorf("not enough bytes in buffer to read long string require %d got: %d", size, len(f.rbuf))) - } - - s = string(f.rbuf[:size]) - f.rbuf = f.rbuf[size:] - return -} - -func (f *framer) readUUID() *UUID { - if len(f.rbuf) < 16 { - panic(fmt.Errorf("not enough bytes in buffer to read uuid require %d got: %d", 16, len(f.rbuf))) - } - - // TODO: how to handle this error, if it is a uuid, then sureley, problems? - u, _ := UUIDFromBytes(f.rbuf[:16]) - f.rbuf = f.rbuf[16:] - return &u -} - -func (f *framer) readStringList() []string { - size := f.readShort() - - l := make([]string, size) - for i := 0; i < int(size); i++ { - l[i] = f.readString() - } - - return l -} - -func (f *framer) readBytesInternal() ([]byte, error) { - size := f.readInt() - if size < 0 { - return nil, nil - } - - if len(f.rbuf) < size { - return nil, fmt.Errorf("not enough bytes in buffer to read bytes require %d got: %d", size, len(f.rbuf)) - } - - l := f.rbuf[:size] - f.rbuf = f.rbuf[size:] - - return l, nil -} - -func (f *framer) readBytes() []byte { - l, err := f.readBytesInternal() - if err != nil { - panic(err) - } - - return l -} - -func (f *framer) readShortBytes() []byte { - size := f.readShort() - if len(f.rbuf) < int(size) { - panic(fmt.Errorf("not enough bytes in buffer to read short bytes: require %d got %d", size, len(f.rbuf))) - } - - l := f.rbuf[:size] - f.rbuf = f.rbuf[size:] - - return l -} - -func (f *framer) readInet() (net.IP, int) { - if len(f.rbuf) < 1 { - panic(fmt.Errorf("not enough bytes in buffer to read inet size require %d got: %d", 1, len(f.rbuf))) - } - - size := f.rbuf[0] - f.rbuf = f.rbuf[1:] - - if !(size == 4 || size == 16) { - panic(fmt.Errorf("invalid IP size: %d", size)) - } - - if len(f.rbuf) < 1 { - panic(fmt.Errorf("not enough bytes in buffer to read inet require %d got: %d", size, len(f.rbuf))) - } - - ip := make([]byte, size) - copy(ip, f.rbuf[:size]) - f.rbuf = f.rbuf[size:] - - port := f.readInt() - return net.IP(ip), port -} - -func (f *framer) readConsistency() Consistency { - return Consistency(f.readShort()) -} - -func (f *framer) readStringMap() map[string]string { - size := f.readShort() - m := make(map[string]string) - - for i := 0; i < int(size); i++ { - k := f.readString() - v := f.readString() - m[k] = v - } - - return m -} - -func (f *framer) readBytesMap() map[string][]byte { - size := f.readShort() - m := make(map[string][]byte) - - for i := 0; i < int(size); i++ { - k := f.readString() - v := f.readBytes() - m[k] = v - } - - return m -} - -func (f *framer) readStringMultiMap() map[string][]string { - size := f.readShort() - m := make(map[string][]string) - - for i := 0; i < int(size); i++ { - k := f.readString() - v := f.readStringList() - m[k] = v - } - - return m -} - -func (f *framer) writeByte(b byte) { - f.wbuf = append(f.wbuf, b) -} - -func appendBytes(p []byte, d []byte) []byte { - if d == nil { - return appendInt(p, -1) - } - p = appendInt(p, int32(len(d))) - p = append(p, d...) - return p -} - -func appendShort(p []byte, n uint16) []byte { - return append(p, - byte(n>>8), - byte(n), - ) -} - -func appendInt(p []byte, n int32) []byte { - return append(p, byte(n>>24), - byte(n>>16), - byte(n>>8), - byte(n)) -} - -func appendLong(p []byte, n int64) []byte { - return append(p, - byte(n>>56), - byte(n>>48), - byte(n>>40), - byte(n>>32), - byte(n>>24), - byte(n>>16), - byte(n>>8), - byte(n), - ) -} - -// these are protocol level binary types -func (f *framer) writeInt(n int32) { - f.wbuf = appendInt(f.wbuf, n) -} - -func (f *framer) writeShort(n uint16) { - f.wbuf = appendShort(f.wbuf, n) -} - -func (f *framer) writeLong(n int64) { - f.wbuf = appendLong(f.wbuf, n) -} - -func (f *framer) writeString(s string) { - f.writeShort(uint16(len(s))) - f.wbuf = append(f.wbuf, s...) -} - -func (f *framer) writeLongString(s string) { - f.writeInt(int32(len(s))) - f.wbuf = append(f.wbuf, s...) -} - -func (f *framer) writeUUID(u *UUID) { - f.wbuf = append(f.wbuf, u[:]...) -} - -func (f *framer) writeStringList(l []string) { - f.writeShort(uint16(len(l))) - for _, s := range l { - f.writeString(s) - } -} - -func (f *framer) writeUnset() { - // Protocol version 4 specifies that bind variables do not require having a - // value when executing a statement. Bind variables without a value are - // called 'unset'. The 'unset' bind variable is serialized as the int - // value '-2' without following bytes. - f.writeInt(-2) -} - -func (f *framer) writeBytes(p []byte) { - // TODO: handle null case correctly, - // [bytes] A [int] n, followed by n bytes if n >= 0. If n < 0, - // no byte should follow and the value represented is `null`. - if p == nil { - f.writeInt(-1) - } else { - f.writeInt(int32(len(p))) - f.wbuf = append(f.wbuf, p...) - } -} - -func (f *framer) writeShortBytes(p []byte) { - f.writeShort(uint16(len(p))) - f.wbuf = append(f.wbuf, p...) -} - -func (f *framer) writeInet(ip net.IP, port int) { - f.wbuf = append(f.wbuf, - byte(len(ip)), - ) - - f.wbuf = append(f.wbuf, - []byte(ip)..., - ) - - f.writeInt(int32(port)) -} - -func (f *framer) writeConsistency(cons Consistency) { - f.writeShort(uint16(cons)) -} - -func (f *framer) writeStringMap(m map[string]string) { - f.writeShort(uint16(len(m))) - for k, v := range m { - f.writeString(k) - f.writeString(v) - } -} diff --git a/vendor/github.com/gocql/gocql/fuzz.go b/vendor/github.com/gocql/gocql/fuzz.go deleted file mode 100644 index 3606f9381d..0000000000 --- a/vendor/github.com/gocql/gocql/fuzz.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build gofuzz - -package gocql - -import "bytes" - -func Fuzz(data []byte) int { - var bw bytes.Buffer - - r := bytes.NewReader(data) - - head, err := readHeader(r, make([]byte, 9)) - if err != nil { - return 0 - } - - framer := newFramer(r, &bw, nil, byte(head.version)) - err = framer.readFrame(&head) - if err != nil { - return 0 - } - - frame, err := framer.parseFrame() - if err != nil { - return 0 - } - - if frame != nil { - return 1 - } - - return 2 -} diff --git a/vendor/github.com/gocql/gocql/helpers.go b/vendor/github.com/gocql/gocql/helpers.go deleted file mode 100644 index 120897903e..0000000000 --- a/vendor/github.com/gocql/gocql/helpers.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright (c) 2012 The gocql Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gocql - -import ( - "fmt" - "math/big" - "reflect" - "strings" - "time" - - "gopkg.in/inf.v0" -) - -type RowData struct { - Columns []string - Values []interface{} -} - -func goType(t TypeInfo) reflect.Type { - switch t.Type() { - case TypeVarchar, TypeAscii, TypeInet, TypeText: - return reflect.TypeOf(*new(string)) - case TypeBigInt, TypeCounter: - return reflect.TypeOf(*new(int64)) - case TypeTimestamp: - return reflect.TypeOf(*new(time.Time)) - case TypeBlob: - return reflect.TypeOf(*new([]byte)) - case TypeBoolean: - return reflect.TypeOf(*new(bool)) - case TypeFloat: - return reflect.TypeOf(*new(float32)) - case TypeDouble: - return reflect.TypeOf(*new(float64)) - case TypeInt: - return reflect.TypeOf(*new(int)) - case TypeSmallInt: - return reflect.TypeOf(*new(int16)) - case TypeTinyInt: - return reflect.TypeOf(*new(int8)) - case TypeDecimal: - return reflect.TypeOf(*new(*inf.Dec)) - case TypeUUID, TypeTimeUUID: - return reflect.TypeOf(*new(UUID)) - case TypeList, TypeSet: - return reflect.SliceOf(goType(t.(CollectionType).Elem)) - case TypeMap: - return reflect.MapOf(goType(t.(CollectionType).Key), goType(t.(CollectionType).Elem)) - case TypeVarint: - return reflect.TypeOf(*new(*big.Int)) - case TypeTuple: - // what can we do here? all there is to do is to make a list of interface{} - tuple := t.(TupleTypeInfo) - return reflect.TypeOf(make([]interface{}, len(tuple.Elems))) - case TypeUDT: - return reflect.TypeOf(make(map[string]interface{})) - case TypeDate: - return reflect.TypeOf(*new(time.Time)) - default: - return nil - } -} - -func dereference(i interface{}) interface{} { - return reflect.Indirect(reflect.ValueOf(i)).Interface() -} - -func getCassandraBaseType(name string) Type { - switch name { - case "ascii": - return TypeAscii - case "bigint": - return TypeBigInt - case "blob": - return TypeBlob - case "boolean": - return TypeBoolean - case "counter": - return TypeCounter - case "decimal": - return TypeDecimal - case "double": - return TypeDouble - case "float": - return TypeFloat - case "int": - return TypeInt - case "timestamp": - return TypeTimestamp - case "uuid": - return TypeUUID - case "varchar": - return TypeVarchar - case "text": - return TypeText - case "varint": - return TypeVarint - case "timeuuid": - return TypeTimeUUID - case "inet": - return TypeInet - case "MapType": - return TypeMap - case "ListType": - return TypeList - case "SetType": - return TypeSet - case "TupleType": - return TypeTuple - default: - return TypeCustom - } -} - -func getCassandraType(name string) TypeInfo { - if strings.HasPrefix(name, "frozen<") { - return getCassandraType(strings.TrimPrefix(name[:len(name)-1], "frozen<")) - } else if strings.HasPrefix(name, "set<") { - return CollectionType{ - NativeType: NativeType{typ: TypeSet}, - Elem: getCassandraType(strings.TrimPrefix(name[:len(name)-1], "set<")), - } - } else if strings.HasPrefix(name, "list<") { - return CollectionType{ - NativeType: NativeType{typ: TypeList}, - Elem: getCassandraType(strings.TrimPrefix(name[:len(name)-1], "list<")), - } - } else if strings.HasPrefix(name, "map<") { - names := strings.Split(strings.TrimPrefix(name[:len(name)-1], "map<"), ", ") - if len(names) != 2 { - panic(fmt.Sprintf("invalid map type: %v", name)) - } - - return CollectionType{ - NativeType: NativeType{typ: TypeMap}, - Key: getCassandraType(names[0]), - Elem: getCassandraType(names[1]), - } - } else if strings.HasPrefix(name, "tuple<") { - names := strings.Split(strings.TrimPrefix(name[:len(name)-1], "tuple<"), ", ") - types := make([]TypeInfo, len(names)) - - for i, name := range names { - types[i] = getCassandraType(name) - } - - return TupleTypeInfo{ - NativeType: NativeType{typ: TypeTuple}, - Elems: types, - } - } else { - return NativeType{ - typ: getCassandraBaseType(name), - } - } -} - -func getApacheCassandraType(class string) Type { - switch strings.TrimPrefix(class, apacheCassandraTypePrefix) { - case "AsciiType": - return TypeAscii - case "LongType": - return TypeBigInt - case "BytesType": - return TypeBlob - case "BooleanType": - return TypeBoolean - case "CounterColumnType": - return TypeCounter - case "DecimalType": - return TypeDecimal - case "DoubleType": - return TypeDouble - case "FloatType": - return TypeFloat - case "Int32Type": - return TypeInt - case "ShortType": - return TypeSmallInt - case "ByteType": - return TypeTinyInt - case "DateType", "TimestampType": - return TypeTimestamp - case "UUIDType", "LexicalUUIDType": - return TypeUUID - case "UTF8Type": - return TypeVarchar - case "IntegerType": - return TypeVarint - case "TimeUUIDType": - return TypeTimeUUID - case "InetAddressType": - return TypeInet - case "MapType": - return TypeMap - case "ListType": - return TypeList - case "SetType": - return TypeSet - case "TupleType": - return TypeTuple - default: - return TypeCustom - } -} - -func typeCanBeNull(typ TypeInfo) bool { - switch typ.(type) { - case CollectionType, UDTTypeInfo, TupleTypeInfo: - return false - } - - return true -} - -func (r *RowData) rowMap(m map[string]interface{}) { - for i, column := range r.Columns { - val := dereference(r.Values[i]) - if valVal := reflect.ValueOf(val); valVal.Kind() == reflect.Slice { - valCopy := reflect.MakeSlice(valVal.Type(), valVal.Len(), valVal.Cap()) - reflect.Copy(valCopy, valVal) - m[column] = valCopy.Interface() - } else { - m[column] = val - } - } -} - -// TupeColumnName will return the column name of a tuple value in a column named -// c at index n. It should be used if a specific element within a tuple is needed -// to be extracted from a map returned from SliceMap or MapScan. -func TupleColumnName(c string, n int) string { - return fmt.Sprintf("%s[%d]", c, n) -} - -func (iter *Iter) RowData() (RowData, error) { - if iter.err != nil { - return RowData{}, iter.err - } - - columns := make([]string, 0, len(iter.Columns())) - values := make([]interface{}, 0, len(iter.Columns())) - - for _, column := range iter.Columns() { - if c, ok := column.TypeInfo.(TupleTypeInfo); !ok { - val := column.TypeInfo.New() - columns = append(columns, column.Name) - values = append(values, val) - } else { - for i, elem := range c.Elems { - columns = append(columns, TupleColumnName(column.Name, i)) - values = append(values, elem.New()) - } - } - } - - rowData := RowData{ - Columns: columns, - Values: values, - } - - return rowData, nil -} - -// TODO(zariel): is it worth exporting this? -func (iter *Iter) rowMap() (map[string]interface{}, error) { - if iter.err != nil { - return nil, iter.err - } - - rowData, _ := iter.RowData() - iter.Scan(rowData.Values...) - m := make(map[string]interface{}, len(rowData.Columns)) - rowData.rowMap(m) - return m, nil -} - -// SliceMap is a helper function to make the API easier to use -// returns the data from the query in the form of []map[string]interface{} -func (iter *Iter) SliceMap() ([]map[string]interface{}, error) { - if iter.err != nil { - return nil, iter.err - } - - // Not checking for the error because we just did - rowData, _ := iter.RowData() - dataToReturn := make([]map[string]interface{}, 0) - for iter.Scan(rowData.Values...) { - m := make(map[string]interface{}, len(rowData.Columns)) - rowData.rowMap(m) - dataToReturn = append(dataToReturn, m) - } - if iter.err != nil { - return nil, iter.err - } - return dataToReturn, nil -} - -// MapScan takes a map[string]interface{} and populates it with a row -// that is returned from cassandra. -// -// Each call to MapScan() must be called with a new map object. -// During the call to MapScan() any pointers in the existing map -// are replaced with non pointer types before the call returns -// -// iter := session.Query(`SELECT * FROM mytable`).Iter() -// for { -// // New map each iteration -// row = make(map[string]interface{}) -// if !iter.MapScan(row) { -// break -// } -// // Do things with row -// if fullname, ok := row["fullname"]; ok { -// fmt.Printf("Full Name: %s\n", fullname) -// } -// } -// -// You can also pass pointers in the map before each call -// -// var fullName FullName // Implements gocql.Unmarshaler and gocql.Marshaler interfaces -// var address net.IP -// var age int -// iter := session.Query(`SELECT * FROM scan_map_table`).Iter() -// for { -// // New map each iteration -// row := map[string]interface{}{ -// "fullname": &fullName, -// "age": &age, -// "address": &address, -// } -// if !iter.MapScan(row) { -// break -// } -// fmt.Printf("First: %s Age: %d Address: %q\n", fullName.FirstName, age, address) -// } -func (iter *Iter) MapScan(m map[string]interface{}) bool { - if iter.err != nil { - return false - } - - // Not checking for the error because we just did - rowData, _ := iter.RowData() - - for i, col := range rowData.Columns { - if dest, ok := m[col]; ok { - rowData.Values[i] = dest - } - } - - if iter.Scan(rowData.Values...) { - rowData.rowMap(m) - return true - } - return false -} - -func copyBytes(p []byte) []byte { - b := make([]byte, len(p)) - copy(b, p) - return b -} diff --git a/vendor/github.com/gocql/gocql/host_source.go b/vendor/github.com/gocql/gocql/host_source.go deleted file mode 100644 index 988324c2e9..0000000000 --- a/vendor/github.com/gocql/gocql/host_source.go +++ /dev/null @@ -1,692 +0,0 @@ -package gocql - -import ( - "errors" - "fmt" - "net" - "strconv" - "strings" - "sync" - "time" -) - -type nodeState int32 - -func (n nodeState) String() string { - if n == NodeUp { - return "UP" - } else if n == NodeDown { - return "DOWN" - } - return fmt.Sprintf("UNKNOWN_%d", n) -} - -const ( - NodeUp nodeState = iota - NodeDown -) - -type cassVersion struct { - Major, Minor, Patch int -} - -func (c *cassVersion) Set(v string) error { - if v == "" { - return nil - } - - return c.UnmarshalCQL(nil, []byte(v)) -} - -func (c *cassVersion) UnmarshalCQL(info TypeInfo, data []byte) error { - return c.unmarshal(data) -} - -func (c *cassVersion) unmarshal(data []byte) error { - version := strings.TrimSuffix(string(data), "-SNAPSHOT") - version = strings.TrimPrefix(version, "v") - v := strings.Split(version, ".") - - if len(v) < 2 { - return fmt.Errorf("invalid version string: %s", data) - } - - var err error - c.Major, err = strconv.Atoi(v[0]) - if err != nil { - return fmt.Errorf("invalid major version %v: %v", v[0], err) - } - - c.Minor, err = strconv.Atoi(v[1]) - if err != nil { - return fmt.Errorf("invalid minor version %v: %v", v[1], err) - } - - if len(v) > 2 { - c.Patch, err = strconv.Atoi(v[2]) - if err != nil { - return fmt.Errorf("invalid patch version %v: %v", v[2], err) - } - } - - return nil -} - -func (c cassVersion) Before(major, minor, patch int) bool { - if c.Major > major { - return true - } else if c.Minor > minor { - return true - } else if c.Patch > patch { - return true - } - return false -} - -func (c cassVersion) String() string { - return fmt.Sprintf("v%d.%d.%d", c.Major, c.Minor, c.Patch) -} - -func (c cassVersion) nodeUpDelay() time.Duration { - if c.Major >= 2 && c.Minor >= 2 { - // CASSANDRA-8236 - return 0 - } - - return 10 * time.Second -} - -type HostInfo struct { - // TODO(zariel): reduce locking maybe, not all values will change, but to ensure - // that we are thread safe use a mutex to access all fields. - mu sync.RWMutex - peer net.IP - broadcastAddress net.IP - listenAddress net.IP - rpcAddress net.IP - preferredIP net.IP - connectAddress net.IP - port int - dataCenter string - rack string - hostId string - workload string - graph bool - dseVersion string - partitioner string - clusterName string - version cassVersion - state nodeState - tokens []string -} - -func (h *HostInfo) Equal(host *HostInfo) bool { - if h == host { - // prevent rlock reentry - return true - } - - return h.ConnectAddress().Equal(host.ConnectAddress()) -} - -func (h *HostInfo) Peer() net.IP { - h.mu.RLock() - defer h.mu.RUnlock() - return h.peer -} - -func (h *HostInfo) setPeer(peer net.IP) *HostInfo { - h.mu.Lock() - defer h.mu.Unlock() - h.peer = peer - return h -} - -func (h *HostInfo) invalidConnectAddr() bool { - h.mu.RLock() - defer h.mu.RUnlock() - addr, _ := h.connectAddressLocked() - return !validIpAddr(addr) -} - -func validIpAddr(addr net.IP) bool { - return addr != nil && !addr.IsUnspecified() -} - -func (h *HostInfo) connectAddressLocked() (net.IP, string) { - if validIpAddr(h.connectAddress) { - return h.connectAddress, "connect_address" - } else if validIpAddr(h.rpcAddress) { - return h.rpcAddress, "rpc_adress" - } else if validIpAddr(h.preferredIP) { - // where does perferred_ip get set? - return h.preferredIP, "preferred_ip" - } else if validIpAddr(h.broadcastAddress) { - return h.broadcastAddress, "broadcast_address" - } else if validIpAddr(h.peer) { - return h.peer, "peer" - } - return net.IPv4zero, "invalid" -} - -// Returns the address that should be used to connect to the host. -// If you wish to override this, use an AddressTranslator or -// use a HostFilter to SetConnectAddress() -func (h *HostInfo) ConnectAddress() net.IP { - h.mu.RLock() - defer h.mu.RUnlock() - - if addr, _ := h.connectAddressLocked(); validIpAddr(addr) { - return addr - } - panic(fmt.Sprintf("no valid connect address for host: %v. Is your cluster configured correctly?", h)) -} - -func (h *HostInfo) SetConnectAddress(address net.IP) *HostInfo { - // TODO(zariel): should this not be exported? - h.mu.Lock() - defer h.mu.Unlock() - h.connectAddress = address - return h -} - -func (h *HostInfo) BroadcastAddress() net.IP { - h.mu.RLock() - defer h.mu.RUnlock() - return h.broadcastAddress -} - -func (h *HostInfo) ListenAddress() net.IP { - h.mu.RLock() - defer h.mu.RUnlock() - return h.listenAddress -} - -func (h *HostInfo) RPCAddress() net.IP { - h.mu.RLock() - defer h.mu.RUnlock() - return h.rpcAddress -} - -func (h *HostInfo) PreferredIP() net.IP { - h.mu.RLock() - defer h.mu.RUnlock() - return h.preferredIP -} - -func (h *HostInfo) DataCenter() string { - h.mu.RLock() - defer h.mu.RUnlock() - return h.dataCenter -} - -func (h *HostInfo) setDataCenter(dataCenter string) *HostInfo { - h.mu.Lock() - defer h.mu.Unlock() - h.dataCenter = dataCenter - return h -} - -func (h *HostInfo) Rack() string { - h.mu.RLock() - defer h.mu.RUnlock() - return h.rack -} - -func (h *HostInfo) setRack(rack string) *HostInfo { - h.mu.Lock() - defer h.mu.Unlock() - h.rack = rack - return h -} - -func (h *HostInfo) HostID() string { - h.mu.RLock() - defer h.mu.RUnlock() - return h.hostId -} - -func (h *HostInfo) setHostID(hostID string) *HostInfo { - h.mu.Lock() - defer h.mu.Unlock() - h.hostId = hostID - return h -} - -func (h *HostInfo) WorkLoad() string { - h.mu.RLock() - defer h.mu.RUnlock() - return h.workload -} - -func (h *HostInfo) Graph() bool { - h.mu.RLock() - defer h.mu.RUnlock() - return h.graph -} - -func (h *HostInfo) DSEVersion() string { - h.mu.RLock() - defer h.mu.RUnlock() - return h.dseVersion -} - -func (h *HostInfo) Partitioner() string { - h.mu.RLock() - defer h.mu.RUnlock() - return h.partitioner -} - -func (h *HostInfo) ClusterName() string { - h.mu.RLock() - defer h.mu.RUnlock() - return h.clusterName -} - -func (h *HostInfo) Version() cassVersion { - h.mu.RLock() - defer h.mu.RUnlock() - return h.version -} - -func (h *HostInfo) setVersion(major, minor, patch int) *HostInfo { - h.mu.Lock() - defer h.mu.Unlock() - h.version = cassVersion{major, minor, patch} - return h -} - -func (h *HostInfo) State() nodeState { - h.mu.RLock() - defer h.mu.RUnlock() - return h.state -} - -func (h *HostInfo) setState(state nodeState) *HostInfo { - h.mu.Lock() - defer h.mu.Unlock() - h.state = state - return h -} - -func (h *HostInfo) Tokens() []string { - h.mu.RLock() - defer h.mu.RUnlock() - return h.tokens -} - -func (h *HostInfo) setTokens(tokens []string) *HostInfo { - h.mu.Lock() - defer h.mu.Unlock() - h.tokens = tokens - return h -} - -func (h *HostInfo) Port() int { - h.mu.RLock() - defer h.mu.RUnlock() - return h.port -} - -func (h *HostInfo) setPort(port int) *HostInfo { - h.mu.Lock() - defer h.mu.Unlock() - h.port = port - return h -} - -func (h *HostInfo) update(from *HostInfo) { - if h == from { - return - } - - h.mu.Lock() - defer h.mu.Unlock() - - from.mu.RLock() - defer from.mu.RUnlock() - - // autogenerated do not update - if h.peer == nil { - h.peer = from.peer - } - if h.broadcastAddress == nil { - h.broadcastAddress = from.broadcastAddress - } - if h.listenAddress == nil { - h.listenAddress = from.listenAddress - } - if h.rpcAddress == nil { - h.rpcAddress = from.rpcAddress - } - if h.preferredIP == nil { - h.preferredIP = from.preferredIP - } - if h.connectAddress == nil { - h.connectAddress = from.connectAddress - } - if h.port == 0 { - h.port = from.port - } - if h.dataCenter == "" { - h.dataCenter = from.dataCenter - } - if h.rack == "" { - h.rack = from.rack - } - if h.hostId == "" { - h.hostId = from.hostId - } - if h.workload == "" { - h.workload = from.workload - } - if h.dseVersion == "" { - h.dseVersion = from.dseVersion - } - if h.partitioner == "" { - h.partitioner = from.partitioner - } - if h.clusterName == "" { - h.clusterName = from.clusterName - } - if h.version == (cassVersion{}) { - h.version = from.version - } - if h.tokens == nil { - h.tokens = from.tokens - } -} - -func (h *HostInfo) IsUp() bool { - return h != nil && h.State() == NodeUp -} - -func (h *HostInfo) String() string { - h.mu.RLock() - defer h.mu.RUnlock() - - connectAddr, source := h.connectAddressLocked() - return fmt.Sprintf("[HostInfo connectAddress=%q peer=%q rpc_address=%q broadcast_address=%q "+ - "preferred_ip=%q connect_addr=%q connect_addr_source=%q "+ - "port=%d data_centre=%q rack=%q host_id=%q version=%q state=%s num_tokens=%d]", - h.connectAddress, h.peer, h.rpcAddress, h.broadcastAddress, h.preferredIP, - connectAddr, source, - h.port, h.dataCenter, h.rack, h.hostId, h.version, h.state, len(h.tokens)) -} - -// Polls system.peers at a specific interval to find new hosts -type ringDescriber struct { - session *Session - mu sync.Mutex - prevHosts []*HostInfo - prevPartitioner string -} - -// Returns true if we are using system_schema.keyspaces instead of system.schema_keyspaces -func checkSystemSchema(control *controlConn) (bool, error) { - iter := control.query("SELECT * FROM system_schema.keyspaces") - if err := iter.err; err != nil { - if errf, ok := err.(*errorFrame); ok { - if errf.code == errSyntax { - return false, nil - } - } - - return false, err - } - - return true, nil -} - -// Given a map that represents a row from either system.local or system.peers -// return as much information as we can in *HostInfo -func (s *Session) hostInfoFromMap(row map[string]interface{}, port int) (*HostInfo, error) { - const assertErrorMsg = "Assertion failed for %s" - var ok bool - - // Default to our connected port if the cluster doesn't have port information - host := HostInfo{ - port: port, - } - - for key, value := range row { - switch key { - case "data_center": - host.dataCenter, ok = value.(string) - if !ok { - return nil, fmt.Errorf(assertErrorMsg, "data_center") - } - case "rack": - host.rack, ok = value.(string) - if !ok { - return nil, fmt.Errorf(assertErrorMsg, "rack") - } - case "host_id": - hostId, ok := value.(UUID) - if !ok { - return nil, fmt.Errorf(assertErrorMsg, "host_id") - } - host.hostId = hostId.String() - case "release_version": - version, ok := value.(string) - if !ok { - return nil, fmt.Errorf(assertErrorMsg, "release_version") - } - host.version.Set(version) - case "peer": - ip, ok := value.(string) - if !ok { - return nil, fmt.Errorf(assertErrorMsg, "peer") - } - host.peer = net.ParseIP(ip) - case "cluster_name": - host.clusterName, ok = value.(string) - if !ok { - return nil, fmt.Errorf(assertErrorMsg, "cluster_name") - } - case "partitioner": - host.partitioner, ok = value.(string) - if !ok { - return nil, fmt.Errorf(assertErrorMsg, "partitioner") - } - case "broadcast_address": - ip, ok := value.(string) - if !ok { - return nil, fmt.Errorf(assertErrorMsg, "broadcast_address") - } - host.broadcastAddress = net.ParseIP(ip) - case "preferred_ip": - ip, ok := value.(string) - if !ok { - return nil, fmt.Errorf(assertErrorMsg, "preferred_ip") - } - host.preferredIP = net.ParseIP(ip) - case "rpc_address": - ip, ok := value.(string) - if !ok { - return nil, fmt.Errorf(assertErrorMsg, "rpc_address") - } - host.rpcAddress = net.ParseIP(ip) - case "listen_address": - ip, ok := value.(string) - if !ok { - return nil, fmt.Errorf(assertErrorMsg, "listen_address") - } - host.listenAddress = net.ParseIP(ip) - case "workload": - host.workload, ok = value.(string) - if !ok { - return nil, fmt.Errorf(assertErrorMsg, "workload") - } - case "graph": - host.graph, ok = value.(bool) - if !ok { - return nil, fmt.Errorf(assertErrorMsg, "graph") - } - case "tokens": - host.tokens, ok = value.([]string) - if !ok { - return nil, fmt.Errorf(assertErrorMsg, "tokens") - } - case "dse_version": - host.dseVersion, ok = value.(string) - if !ok { - return nil, fmt.Errorf(assertErrorMsg, "dse_version") - } - } - // TODO(thrawn01): Add 'port'? once CASSANDRA-7544 is complete - // Not sure what the port field will be called until the JIRA issue is complete - } - - ip, port := s.cfg.translateAddressPort(host.ConnectAddress(), host.port) - host.connectAddress = ip - host.port = port - - return &host, nil -} - -// Ask the control node for host info on all it's known peers -func (r *ringDescriber) getClusterPeerInfo() ([]*HostInfo, error) { - var hosts []*HostInfo - iter := r.session.control.withConnHost(func(ch *connHost) *Iter { - hosts = append(hosts, ch.host) - return ch.conn.query("SELECT * FROM system.peers") - }) - - if iter == nil { - return nil, errNoControl - } - - rows, err := iter.SliceMap() - if err != nil { - // TODO(zariel): make typed error - return nil, fmt.Errorf("unable to fetch peer host info: %s", err) - } - - for _, row := range rows { - // extract all available info about the peer - host, err := r.session.hostInfoFromMap(row, r.session.cfg.Port) - if err != nil { - return nil, err - } else if !isValidPeer(host) { - // If it's not a valid peer - Logger.Printf("Found invalid peer '%s' "+ - "Likely due to a gossip or snitch issue, this host will be ignored", host) - continue - } - - hosts = append(hosts, host) - } - - return hosts, nil -} - -// Return true if the host is a valid peer -func isValidPeer(host *HostInfo) bool { - return !(len(host.RPCAddress()) == 0 || - host.hostId == "" || - host.dataCenter == "" || - host.rack == "" || - len(host.tokens) == 0) -} - -// Return a list of hosts the cluster knows about -func (r *ringDescriber) GetHosts() ([]*HostInfo, string, error) { - r.mu.Lock() - defer r.mu.Unlock() - - hosts, err := r.getClusterPeerInfo() - if err != nil { - return r.prevHosts, r.prevPartitioner, err - } - - var partitioner string - if len(hosts) > 0 { - partitioner = hosts[0].Partitioner() - } - - return hosts, partitioner, nil -} - -// Given an ip/port return HostInfo for the specified ip/port -func (r *ringDescriber) getHostInfo(ip net.IP, port int) (*HostInfo, error) { - var host *HostInfo - iter := r.session.control.withConnHost(func(ch *connHost) *Iter { - if ch.host.ConnectAddress().Equal(ip) { - host = ch.host - return nil - } - - return ch.conn.query("SELECT * FROM system.peers") - }) - - if iter != nil { - rows, err := iter.SliceMap() - if err != nil { - return nil, err - } - - for _, row := range rows { - h, err := r.session.hostInfoFromMap(row, port) - if err != nil { - return nil, err - } - - if h.ConnectAddress().Equal(ip) { - host = h - break - } - } - - if host == nil { - return nil, errors.New("host not found in peers table") - } - } - - if host == nil { - return nil, errors.New("unable to fetch host info: invalid control connection") - } else if host.invalidConnectAddr() { - return nil, fmt.Errorf("host ConnectAddress invalid ip=%v: %v", ip, host) - } - - return host, nil -} - -func (r *ringDescriber) refreshRing() error { - // if we have 0 hosts this will return the previous list of hosts to - // attempt to reconnect to the cluster otherwise we would never find - // downed hosts again, could possibly have an optimisation to only - // try to add new hosts if GetHosts didnt error and the hosts didnt change. - hosts, partitioner, err := r.GetHosts() - if err != nil { - return err - } - - prevHosts := r.session.ring.currentHosts() - - // TODO: move this to session - for _, h := range hosts { - if filter := r.session.cfg.HostFilter; filter != nil && !filter.Accept(h) { - continue - } - - if host, ok := r.session.ring.addHostIfMissing(h); !ok { - r.session.pool.addHost(h) - r.session.policy.AddHost(h) - } else { - host.update(h) - } - delete(prevHosts, h.ConnectAddress().String()) - } - - // TODO(zariel): it may be worth having a mutex covering the overall ring state - // in a session so that everything sees a consistent state. Becuase as is today - // events can come in and due to ordering an UP host could be removed from the cluster - for _, host := range prevHosts { - r.session.removeHost(host) - } - - r.session.metadata.setPartitioner(partitioner) - r.session.policy.SetPartitioner(partitioner) - return nil -} diff --git a/vendor/github.com/gocql/gocql/host_source_gen.go b/vendor/github.com/gocql/gocql/host_source_gen.go deleted file mode 100644 index c82193cbd4..0000000000 --- a/vendor/github.com/gocql/gocql/host_source_gen.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build genhostinfo - -package main - -import ( - "fmt" - "reflect" - "sync" - - "github.com/gocql/gocql" -) - -func gen(clause, field string) { - fmt.Printf("if h.%s == %s {\n", field, clause) - fmt.Printf("\th.%s = from.%s\n", field, field) - fmt.Println("}") -} - -func main() { - t := reflect.ValueOf(&gocql.HostInfo{}).Elem().Type() - mu := reflect.TypeOf(sync.RWMutex{}) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type == mu { - continue - } - - switch f.Type.Kind() { - case reflect.Slice: - gen("nil", f.Name) - case reflect.String: - gen(`""`, f.Name) - case reflect.Int: - gen("0", f.Name) - case reflect.Struct: - gen("("+f.Type.Name()+"{})", f.Name) - case reflect.Bool, reflect.Int32: - continue - default: - panic(fmt.Sprintf("unknown field: %s", f)) - } - } - -} diff --git a/vendor/github.com/gocql/gocql/integration.sh b/vendor/github.com/gocql/gocql/integration.sh deleted file mode 100755 index a6692d2151..0000000000 --- a/vendor/github.com/gocql/gocql/integration.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash - -set -eux - -function run_tests() { - local clusterSize=3 - local version=$1 - local auth=$2 - - if [ "$auth" = true ]; then - clusterSize=1 - fi - - local keypath="$(pwd)/testdata/pki" - - local conf=( - "client_encryption_options.enabled: true" - "client_encryption_options.keystore: $keypath/.keystore" - "client_encryption_options.keystore_password: cassandra" - "client_encryption_options.require_client_auth: true" - "client_encryption_options.truststore: $keypath/.truststore" - "client_encryption_options.truststore_password: cassandra" - "concurrent_reads: 2" - "concurrent_writes: 2" - "rpc_server_type: sync" - "rpc_min_threads: 2" - "rpc_max_threads: 2" - "write_request_timeout_in_ms: 5000" - "read_request_timeout_in_ms: 5000" - ) - - ccm remove test || true - - ccm create test -v $version -n $clusterSize -d --vnodes --jvm_arg="-Xmx256m -XX:NewSize=100m" - ccm updateconf "${conf[@]}" - - if [ "$auth" = true ] - then - ccm updateconf 'authenticator: PasswordAuthenticator' 'authorizer: CassandraAuthorizer' - rm -rf $HOME/.ccm/test/node1/data/system_auth - fi - - local proto=2 - if [[ $version == 1.2.* ]]; then - proto=1 - elif [[ $version == 2.0.* ]]; then - proto=2 - elif [[ $version == 2.1.* ]]; then - proto=3 - elif [[ $version == 2.2.* || $version == 3.0.* ]]; then - proto=4 - ccm updateconf 'enable_user_defined_functions: true' - elif [[ $version == 3.*.* ]]; then - proto=4 - ccm updateconf 'enable_user_defined_functions: true' - fi - - sleep 1s - - ccm list - ccm start --wait-for-binary-proto - ccm status - ccm node1 nodetool status - - local args="-gocql.timeout=60s -runssl -proto=$proto -rf=3 -clusterSize=$clusterSize -autowait=2000ms -compressor=snappy -gocql.cversion=$version -cluster=$(ccm liveset) ./..." - - go test -v -tags unit - - if [ "$auth" = true ] - then - sleep 30s - go test -run=TestAuthentication -tags "integration gocql_debug" -timeout=15s -runauth $args - else - sleep 1s - go test -tags "integration gocql_debug" -timeout=5m $args - - ccm clear - ccm start - sleep 1s - - go test -tags "ccm gocql_debug" -timeout=5m $args - fi - - ccm remove -} - -run_tests $1 $2 diff --git a/vendor/github.com/gocql/gocql/internal/lru/lru.go b/vendor/github.com/gocql/gocql/internal/lru/lru.go deleted file mode 100644 index 14ca1f4332..0000000000 --- a/vendor/github.com/gocql/gocql/internal/lru/lru.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2015 To gocql authors -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package lru implements an LRU cache. -package lru - -import "container/list" - -// Cache is an LRU cache. It is not safe for concurrent access. -// -// This cache has been forked from github.com/golang/groupcache/lru, but -// specialized with string keys to avoid the allocations caused by wrapping them -// in interface{}. -type Cache struct { - // MaxEntries is the maximum number of cache entries before - // an item is evicted. Zero means no limit. - MaxEntries int - - // OnEvicted optionally specifies a callback function to be - // executed when an entry is purged from the cache. - OnEvicted func(key string, value interface{}) - - ll *list.List - cache map[string]*list.Element -} - -type entry struct { - key string - value interface{} -} - -// New creates a new Cache. -// If maxEntries is zero, the cache has no limit and it's assumed -// that eviction is done by the caller. -func New(maxEntries int) *Cache { - return &Cache{ - MaxEntries: maxEntries, - ll: list.New(), - cache: make(map[string]*list.Element), - } -} - -// Add adds a value to the cache. -func (c *Cache) Add(key string, value interface{}) { - if c.cache == nil { - c.cache = make(map[string]*list.Element) - c.ll = list.New() - } - if ee, ok := c.cache[key]; ok { - c.ll.MoveToFront(ee) - ee.Value.(*entry).value = value - return - } - ele := c.ll.PushFront(&entry{key, value}) - c.cache[key] = ele - if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { - c.RemoveOldest() - } -} - -// Get looks up a key's value from the cache. -func (c *Cache) Get(key string) (value interface{}, ok bool) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.ll.MoveToFront(ele) - return ele.Value.(*entry).value, true - } - return -} - -// Remove removes the provided key from the cache. -func (c *Cache) Remove(key string) bool { - if c.cache == nil { - return false - } - - if ele, hit := c.cache[key]; hit { - c.removeElement(ele) - return true - } - - return false -} - -// RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { - if c.cache == nil { - return - } - ele := c.ll.Back() - if ele != nil { - c.removeElement(ele) - } -} - -func (c *Cache) removeElement(e *list.Element) { - c.ll.Remove(e) - kv := e.Value.(*entry) - delete(c.cache, kv.key) - if c.OnEvicted != nil { - c.OnEvicted(kv.key, kv.value) - } -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - if c.cache == nil { - return 0 - } - return c.ll.Len() -} diff --git a/vendor/github.com/gocql/gocql/internal/murmur/murmur.go b/vendor/github.com/gocql/gocql/internal/murmur/murmur.go deleted file mode 100644 index d006cc0bf1..0000000000 --- a/vendor/github.com/gocql/gocql/internal/murmur/murmur.go +++ /dev/null @@ -1,135 +0,0 @@ -package murmur - -const ( - c1 int64 = -8663945395140668459 // 0x87c37b91114253d5 - c2 int64 = 5545529020109919103 // 0x4cf5ad432745937f - fmix1 int64 = -49064778989728563 // 0xff51afd7ed558ccd - fmix2 int64 = -4265267296055464877 // 0xc4ceb9fe1a85ec53 -) - -func fmix(n int64) int64 { - // cast to unsigned for logical right bitshift (to match C* MM3 implementation) - n ^= int64(uint64(n) >> 33) - n *= fmix1 - n ^= int64(uint64(n) >> 33) - n *= fmix2 - n ^= int64(uint64(n) >> 33) - - return n -} - -func block(p byte) int64 { - return int64(int8(p)) -} - -func rotl(x int64, r uint8) int64 { - // cast to unsigned for logical right bitshift (to match C* MM3 implementation) - return (x << r) | (int64)((uint64(x) >> (64 - r))) -} - -func Murmur3H1(data []byte) int64 { - length := len(data) - - var h1, h2, k1, k2 int64 - - // body - nBlocks := length / 16 - for i := 0; i < nBlocks; i++ { - k1, k2 = getBlock(data, i) - - k1 *= c1 - k1 = rotl(k1, 31) - k1 *= c2 - h1 ^= k1 - - h1 = rotl(h1, 27) - h1 += h2 - h1 = h1*5 + 0x52dce729 - - k2 *= c2 - k2 = rotl(k2, 33) - k2 *= c1 - h2 ^= k2 - - h2 = rotl(h2, 31) - h2 += h1 - h2 = h2*5 + 0x38495ab5 - } - - // tail - tail := data[nBlocks*16:] - k1 = 0 - k2 = 0 - switch length & 15 { - case 15: - k2 ^= block(tail[14]) << 48 - fallthrough - case 14: - k2 ^= block(tail[13]) << 40 - fallthrough - case 13: - k2 ^= block(tail[12]) << 32 - fallthrough - case 12: - k2 ^= block(tail[11]) << 24 - fallthrough - case 11: - k2 ^= block(tail[10]) << 16 - fallthrough - case 10: - k2 ^= block(tail[9]) << 8 - fallthrough - case 9: - k2 ^= block(tail[8]) - - k2 *= c2 - k2 = rotl(k2, 33) - k2 *= c1 - h2 ^= k2 - - fallthrough - case 8: - k1 ^= block(tail[7]) << 56 - fallthrough - case 7: - k1 ^= block(tail[6]) << 48 - fallthrough - case 6: - k1 ^= block(tail[5]) << 40 - fallthrough - case 5: - k1 ^= block(tail[4]) << 32 - fallthrough - case 4: - k1 ^= block(tail[3]) << 24 - fallthrough - case 3: - k1 ^= block(tail[2]) << 16 - fallthrough - case 2: - k1 ^= block(tail[1]) << 8 - fallthrough - case 1: - k1 ^= block(tail[0]) - - k1 *= c1 - k1 = rotl(k1, 31) - k1 *= c2 - h1 ^= k1 - } - - h1 ^= int64(length) - h2 ^= int64(length) - - h1 += h2 - h2 += h1 - - h1 = fmix(h1) - h2 = fmix(h2) - - h1 += h2 - // the following is extraneous since h2 is discarded - // h2 += h1 - - return h1 -} diff --git a/vendor/github.com/gocql/gocql/internal/murmur/murmur_appengine.go b/vendor/github.com/gocql/gocql/internal/murmur/murmur_appengine.go deleted file mode 100644 index fd9ab5c14c..0000000000 --- a/vendor/github.com/gocql/gocql/internal/murmur/murmur_appengine.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine - -package murmur - -import "encoding/binary" - -func getBlock(data []byte, n int) (int64, int64) { - k1 := binary.LittleEndian.Int64(data[n*16:]) - k2 := binary.LittleEndian.Int64(data[(n*16)+8:]) - return k1, k2 -} diff --git a/vendor/github.com/gocql/gocql/internal/murmur/murmur_unsafe.go b/vendor/github.com/gocql/gocql/internal/murmur/murmur_unsafe.go deleted file mode 100644 index 501537c77e..0000000000 --- a/vendor/github.com/gocql/gocql/internal/murmur/murmur_unsafe.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !appengine - -package murmur - -import ( - "unsafe" -) - -func getBlock(data []byte, n int) (int64, int64) { - block := (*[2]int64)(unsafe.Pointer(&data[n*16])) - - k1 := block[0] - k2 := block[1] - return k1, k2 -} diff --git a/vendor/github.com/gocql/gocql/internal/streams/streams.go b/vendor/github.com/gocql/gocql/internal/streams/streams.go deleted file mode 100644 index ae1ea97903..0000000000 --- a/vendor/github.com/gocql/gocql/internal/streams/streams.go +++ /dev/null @@ -1,140 +0,0 @@ -package streams - -import ( - "math" - "strconv" - "sync/atomic" -) - -const bucketBits = 64 - -// IDGenerator tracks and allocates streams which are in use. -type IDGenerator struct { - NumStreams int - inuseStreams int32 - numBuckets uint32 - - // streams is a bitset where each bit represents a stream, a 1 implies in use - streams []uint64 - offset uint32 -} - -func New(protocol int) *IDGenerator { - maxStreams := 128 - if protocol > 2 { - maxStreams = 32768 - } - - buckets := maxStreams / 64 - // reserve stream 0 - streams := make([]uint64, buckets) - streams[0] = 1 << 63 - - return &IDGenerator{ - NumStreams: maxStreams, - streams: streams, - numBuckets: uint32(buckets), - offset: uint32(buckets) - 1, - } -} - -func streamFromBucket(bucket, streamInBucket int) int { - return (bucket * bucketBits) + streamInBucket -} - -func (s *IDGenerator) GetStream() (int, bool) { - // based closely on the java-driver stream ID generator - // avoid false sharing subsequent requests. - offset := atomic.LoadUint32(&s.offset) - for !atomic.CompareAndSwapUint32(&s.offset, offset, (offset+1)%s.numBuckets) { - offset = atomic.LoadUint32(&s.offset) - } - offset = (offset + 1) % s.numBuckets - - for i := uint32(0); i < s.numBuckets; i++ { - pos := int((i + offset) % s.numBuckets) - - bucket := atomic.LoadUint64(&s.streams[pos]) - if bucket == math.MaxUint64 { - // all streams in use - continue - } - - for j := 0; j < bucketBits; j++ { - mask := uint64(1 << streamOffset(j)) - for bucket&mask == 0 { - if atomic.CompareAndSwapUint64(&s.streams[pos], bucket, bucket|mask) { - atomic.AddInt32(&s.inuseStreams, 1) - return streamFromBucket(int(pos), j), true - } - bucket = atomic.LoadUint64(&s.streams[pos]) - } - } - } - - return 0, false -} - -func bitfmt(b uint64) string { - return strconv.FormatUint(b, 16) -} - -// returns the bucket offset of a given stream -func bucketOffset(i int) int { - return i / bucketBits -} - -func streamOffset(stream int) uint64 { - return bucketBits - uint64(stream%bucketBits) - 1 -} - -func isSet(bits uint64, stream int) bool { - return bits>>streamOffset(stream)&1 == 1 -} - -func (s *IDGenerator) isSet(stream int) bool { - bits := atomic.LoadUint64(&s.streams[bucketOffset(stream)]) - return isSet(bits, stream) -} - -func (s *IDGenerator) String() string { - size := s.numBuckets * (bucketBits + 1) - buf := make([]byte, 0, size) - for i := 0; i < int(s.numBuckets); i++ { - bits := atomic.LoadUint64(&s.streams[i]) - buf = append(buf, bitfmt(bits)...) - buf = append(buf, ' ') - } - return string(buf[:size-1 : size-1]) -} - -func (s *IDGenerator) Clear(stream int) (inuse bool) { - offset := bucketOffset(stream) - bucket := atomic.LoadUint64(&s.streams[offset]) - - mask := uint64(1) << streamOffset(stream) - if bucket&mask != mask { - // already cleared - return false - } - - for !atomic.CompareAndSwapUint64(&s.streams[offset], bucket, bucket & ^mask) { - bucket = atomic.LoadUint64(&s.streams[offset]) - if bucket&mask != mask { - // already cleared - return false - } - } - - // TODO: make this account for 0 stream being reserved - if atomic.AddInt32(&s.inuseStreams, -1) < 0 { - // TODO(zariel): remove this - panic("negative streams inuse") - } - - return true -} - -func (s *IDGenerator) Available() int { - return s.NumStreams - int(atomic.LoadInt32(&s.inuseStreams)) - 1 -} diff --git a/vendor/github.com/gocql/gocql/logger.go b/vendor/github.com/gocql/gocql/logger.go deleted file mode 100644 index bd16d4134b..0000000000 --- a/vendor/github.com/gocql/gocql/logger.go +++ /dev/null @@ -1,30 +0,0 @@ -package gocql - -import ( - "bytes" - "fmt" - "log" -) - -type StdLogger interface { - Print(v ...interface{}) - Printf(format string, v ...interface{}) - Println(v ...interface{}) -} - -type testLogger struct { - capture bytes.Buffer -} - -func (l *testLogger) Print(v ...interface{}) { fmt.Fprint(&l.capture, v...) } -func (l *testLogger) Printf(format string, v ...interface{}) { fmt.Fprintf(&l.capture, format, v...) } -func (l *testLogger) Println(v ...interface{}) { fmt.Fprintln(&l.capture, v...) } -func (l *testLogger) String() string { return l.capture.String() } - -type defaultLogger struct{} - -func (l *defaultLogger) Print(v ...interface{}) { log.Print(v...) } -func (l *defaultLogger) Printf(format string, v ...interface{}) { log.Printf(format, v...) } -func (l *defaultLogger) Println(v ...interface{}) { log.Println(v...) } - -var Logger StdLogger = &defaultLogger{} diff --git a/vendor/github.com/gocql/gocql/marshal.go b/vendor/github.com/gocql/gocql/marshal.go deleted file mode 100644 index fbc41aee66..0000000000 --- a/vendor/github.com/gocql/gocql/marshal.go +++ /dev/null @@ -1,2216 +0,0 @@ -// Copyright (c) 2012 The gocql Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gocql - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "math" - "math/big" - "net" - "reflect" - "strconv" - "strings" - "time" - - "gopkg.in/inf.v0" -) - -var ( - bigOne = big.NewInt(1) - emptyValue reflect.Value -) - -var ( - ErrorUDTUnavailable = errors.New("UDT are not available on protocols less than 3, please update config") -) - -// Marshaler is the interface implemented by objects that can marshal -// themselves into values understood by Cassandra. -type Marshaler interface { - MarshalCQL(info TypeInfo) ([]byte, error) -} - -// Unmarshaler is the interface implemented by objects that can unmarshal -// a Cassandra specific description of themselves. -type Unmarshaler interface { - UnmarshalCQL(info TypeInfo, data []byte) error -} - -// Marshal returns the CQL encoding of the value for the Cassandra -// internal type described by the info parameter. -func Marshal(info TypeInfo, value interface{}) ([]byte, error) { - if info.Version() < protoVersion1 { - panic("protocol version not set") - } - - if valueRef := reflect.ValueOf(value); valueRef.Kind() == reflect.Ptr { - if valueRef.IsNil() { - return nil, nil - } else if v, ok := value.(Marshaler); ok { - return v.MarshalCQL(info) - } else { - return Marshal(info, valueRef.Elem().Interface()) - } - } - - if v, ok := value.(Marshaler); ok { - return v.MarshalCQL(info) - } - - switch info.Type() { - case TypeVarchar, TypeAscii, TypeBlob, TypeText: - return marshalVarchar(info, value) - case TypeBoolean: - return marshalBool(info, value) - case TypeTinyInt: - return marshalTinyInt(info, value) - case TypeSmallInt: - return marshalSmallInt(info, value) - case TypeInt: - return marshalInt(info, value) - case TypeBigInt, TypeCounter: - return marshalBigInt(info, value) - case TypeFloat: - return marshalFloat(info, value) - case TypeDouble: - return marshalDouble(info, value) - case TypeDecimal: - return marshalDecimal(info, value) - case TypeTimestamp, TypeTime: - return marshalTimestamp(info, value) - case TypeList, TypeSet: - return marshalList(info, value) - case TypeMap: - return marshalMap(info, value) - case TypeUUID, TypeTimeUUID: - return marshalUUID(info, value) - case TypeVarint: - return marshalVarint(info, value) - case TypeInet: - return marshalInet(info, value) - case TypeTuple: - return marshalTuple(info, value) - case TypeUDT: - return marshalUDT(info, value) - case TypeDate: - return marshalDate(info, value) - } - - // detect protocol 2 UDT - if strings.HasPrefix(info.Custom(), "org.apache.cassandra.db.marshal.UserType") && info.Version() < 3 { - return nil, ErrorUDTUnavailable - } - - // TODO(tux21b): add the remaining types - return nil, fmt.Errorf("can not marshal %T into %s", value, info) -} - -// Unmarshal parses the CQL encoded data based on the info parameter that -// describes the Cassandra internal data type and stores the result in the -// value pointed by value. -func Unmarshal(info TypeInfo, data []byte, value interface{}) error { - if v, ok := value.(Unmarshaler); ok { - return v.UnmarshalCQL(info, data) - } - - if isNullableValue(value) { - return unmarshalNullable(info, data, value) - } - - switch info.Type() { - case TypeVarchar, TypeAscii, TypeBlob, TypeText: - return unmarshalVarchar(info, data, value) - case TypeBoolean: - return unmarshalBool(info, data, value) - case TypeInt: - return unmarshalInt(info, data, value) - case TypeBigInt, TypeCounter: - return unmarshalBigInt(info, data, value) - case TypeVarint: - return unmarshalVarint(info, data, value) - case TypeSmallInt: - return unmarshalSmallInt(info, data, value) - case TypeTinyInt: - return unmarshalTinyInt(info, data, value) - case TypeFloat: - return unmarshalFloat(info, data, value) - case TypeDouble: - return unmarshalDouble(info, data, value) - case TypeDecimal: - return unmarshalDecimal(info, data, value) - case TypeTimestamp, TypeTime: - return unmarshalTimestamp(info, data, value) - case TypeList, TypeSet: - return unmarshalList(info, data, value) - case TypeMap: - return unmarshalMap(info, data, value) - case TypeTimeUUID: - return unmarshalTimeUUID(info, data, value) - case TypeUUID: - return unmarshalUUID(info, data, value) - case TypeInet: - return unmarshalInet(info, data, value) - case TypeTuple: - return unmarshalTuple(info, data, value) - case TypeUDT: - return unmarshalUDT(info, data, value) - case TypeDate: - return unmarshalDate(info, data, value) - } - - // detect protocol 2 UDT - if strings.HasPrefix(info.Custom(), "org.apache.cassandra.db.marshal.UserType") && info.Version() < 3 { - return ErrorUDTUnavailable - } - - // TODO(tux21b): add the remaining types - return fmt.Errorf("can not unmarshal %s into %T", info, value) -} - -func isNullableValue(value interface{}) bool { - v := reflect.ValueOf(value) - return v.Kind() == reflect.Ptr && v.Type().Elem().Kind() == reflect.Ptr -} - -func isNullData(info TypeInfo, data []byte) bool { - return data == nil -} - -func unmarshalNullable(info TypeInfo, data []byte, value interface{}) error { - valueRef := reflect.ValueOf(value) - - if isNullData(info, data) { - nilValue := reflect.Zero(valueRef.Type().Elem()) - valueRef.Elem().Set(nilValue) - return nil - } - - newValue := reflect.New(valueRef.Type().Elem().Elem()) - valueRef.Elem().Set(newValue) - return Unmarshal(info, data, newValue.Interface()) -} - -func marshalVarchar(info TypeInfo, value interface{}) ([]byte, error) { - switch v := value.(type) { - case Marshaler: - return v.MarshalCQL(info) - case unsetColumn: - return nil, nil - case string: - return []byte(v), nil - case []byte: - return v, nil - } - - if value == nil { - return nil, nil - } - - rv := reflect.ValueOf(value) - t := rv.Type() - k := t.Kind() - switch { - case k == reflect.String: - return []byte(rv.String()), nil - case k == reflect.Slice && t.Elem().Kind() == reflect.Uint8: - return rv.Bytes(), nil - } - return nil, marshalErrorf("can not marshal %T into %s", value, info) -} - -func unmarshalVarchar(info TypeInfo, data []byte, value interface{}) error { - switch v := value.(type) { - case Unmarshaler: - return v.UnmarshalCQL(info, data) - case *string: - *v = string(data) - return nil - case *[]byte: - if data != nil { - *v = copyBytes(data) - } else { - *v = nil - } - return nil - } - rv := reflect.ValueOf(value) - if rv.Kind() != reflect.Ptr { - return unmarshalErrorf("can not unmarshal into non-pointer %T", value) - } - rv = rv.Elem() - t := rv.Type() - k := t.Kind() - switch { - case k == reflect.String: - rv.SetString(string(data)) - return nil - case k == reflect.Slice && t.Elem().Kind() == reflect.Uint8: - var dataCopy []byte - if data != nil { - dataCopy = make([]byte, len(data)) - copy(dataCopy, data) - } - rv.SetBytes(dataCopy) - return nil - } - return unmarshalErrorf("can not unmarshal %s into %T", info, value) -} - -func marshalSmallInt(info TypeInfo, value interface{}) ([]byte, error) { - switch v := value.(type) { - case Marshaler: - return v.MarshalCQL(info) - case unsetColumn: - return nil, nil - case int16: - return encShort(v), nil - case uint16: - return encShort(int16(v)), nil - case int8: - return encShort(int16(v)), nil - case uint8: - return encShort(int16(v)), nil - case int: - if v > math.MaxInt16 || v < math.MinInt16 { - return nil, marshalErrorf("marshal smallint: value %d out of range", v) - } - return encShort(int16(v)), nil - case int32: - if v > math.MaxInt16 || v < math.MinInt16 { - return nil, marshalErrorf("marshal smallint: value %d out of range", v) - } - return encShort(int16(v)), nil - case int64: - if v > math.MaxInt16 || v < math.MinInt16 { - return nil, marshalErrorf("marshal smallint: value %d out of range", v) - } - return encShort(int16(v)), nil - case uint: - if v > math.MaxUint16 { - return nil, marshalErrorf("marshal smallint: value %d out of range", v) - } - return encShort(int16(v)), nil - case uint32: - if v > math.MaxUint16 { - return nil, marshalErrorf("marshal smallint: value %d out of range", v) - } - return encShort(int16(v)), nil - case uint64: - if v > math.MaxUint16 { - return nil, marshalErrorf("marshal smallint: value %d out of range", v) - } - return encShort(int16(v)), nil - case string: - n, err := strconv.ParseInt(v, 10, 16) - if err != nil { - return nil, marshalErrorf("can not marshal %T into %s: %v", value, info, err) - } - return encShort(int16(n)), nil - } - - if value == nil { - return nil, nil - } - - switch rv := reflect.ValueOf(value); rv.Type().Kind() { - case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: - v := rv.Int() - if v > math.MaxInt16 || v < math.MinInt16 { - return nil, marshalErrorf("marshal smallint: value %d out of range", v) - } - return encShort(int16(v)), nil - case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8: - v := rv.Uint() - if v > math.MaxUint16 { - return nil, marshalErrorf("marshal smallint: value %d out of range", v) - } - return encShort(int16(v)), nil - default: - if rv.IsNil() { - return nil, nil - } - } - - return nil, marshalErrorf("can not marshal %T into %s", value, info) -} - -func marshalTinyInt(info TypeInfo, value interface{}) ([]byte, error) { - switch v := value.(type) { - case Marshaler: - return v.MarshalCQL(info) - case unsetColumn: - return nil, nil - case int8: - return []byte{byte(v)}, nil - case uint8: - return []byte{byte(v)}, nil - case int16: - if v > math.MaxInt8 || v < math.MinInt8 { - return nil, marshalErrorf("marshal tinyint: value %d out of range", v) - } - return []byte{byte(v)}, nil - case uint16: - if v > math.MaxUint8 { - return nil, marshalErrorf("marshal tinyint: value %d out of range", v) - } - return []byte{byte(v)}, nil - case int: - if v > math.MaxInt8 || v < math.MinInt8 { - return nil, marshalErrorf("marshal tinyint: value %d out of range", v) - } - return []byte{byte(v)}, nil - case int32: - if v > math.MaxInt8 || v < math.MinInt8 { - return nil, marshalErrorf("marshal tinyint: value %d out of range", v) - } - return []byte{byte(v)}, nil - case int64: - if v > math.MaxInt8 || v < math.MinInt8 { - return nil, marshalErrorf("marshal tinyint: value %d out of range", v) - } - return []byte{byte(v)}, nil - case uint: - if v > math.MaxUint8 { - return nil, marshalErrorf("marshal tinyint: value %d out of range", v) - } - return []byte{byte(v)}, nil - case uint32: - if v > math.MaxUint8 { - return nil, marshalErrorf("marshal tinyint: value %d out of range", v) - } - return []byte{byte(v)}, nil - case uint64: - if v > math.MaxUint8 { - return nil, marshalErrorf("marshal tinyint: value %d out of range", v) - } - return []byte{byte(v)}, nil - case string: - n, err := strconv.ParseInt(v, 10, 8) - if err != nil { - return nil, marshalErrorf("can not marshal %T into %s: %v", value, info, err) - } - return []byte{byte(n)}, nil - } - - if value == nil { - return nil, nil - } - - switch rv := reflect.ValueOf(value); rv.Type().Kind() { - case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: - v := rv.Int() - if v > math.MaxInt8 || v < math.MinInt8 { - return nil, marshalErrorf("marshal tinyint: value %d out of range", v) - } - return []byte{byte(v)}, nil - case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8: - v := rv.Uint() - if v > math.MaxUint8 { - return nil, marshalErrorf("marshal tinyint: value %d out of range", v) - } - return []byte{byte(v)}, nil - default: - if rv.IsNil() { - return nil, nil - } - } - - return nil, marshalErrorf("can not marshal %T into %s", value, info) -} - -func marshalInt(info TypeInfo, value interface{}) ([]byte, error) { - switch v := value.(type) { - case Marshaler: - return v.MarshalCQL(info) - case unsetColumn: - return nil, nil - case int: - if v > math.MaxInt32 || v < math.MinInt32 { - return nil, marshalErrorf("marshal int: value %d out of range", v) - } - return encInt(int32(v)), nil - case uint: - if v > math.MaxUint32 { - return nil, marshalErrorf("marshal int: value %d out of range", v) - } - return encInt(int32(v)), nil - case int64: - if v > math.MaxInt32 || v < math.MinInt32 { - return nil, marshalErrorf("marshal int: value %d out of range", v) - } - return encInt(int32(v)), nil - case uint64: - if v > math.MaxUint32 { - return nil, marshalErrorf("marshal int: value %d out of range", v) - } - return encInt(int32(v)), nil - case int32: - return encInt(v), nil - case uint32: - return encInt(int32(v)), nil - case int16: - return encInt(int32(v)), nil - case uint16: - return encInt(int32(v)), nil - case int8: - return encInt(int32(v)), nil - case uint8: - return encInt(int32(v)), nil - case string: - i, err := strconv.ParseInt(v, 10, 32) - if err != nil { - return nil, marshalErrorf("can not marshal string to int: %s", err) - } - return encInt(int32(i)), nil - } - - if value == nil { - return nil, nil - } - - switch rv := reflect.ValueOf(value); rv.Type().Kind() { - case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: - v := rv.Int() - if v > math.MaxInt32 || v < math.MinInt32 { - return nil, marshalErrorf("marshal int: value %d out of range", v) - } - return encInt(int32(v)), nil - case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8: - v := rv.Uint() - if v > math.MaxInt32 { - return nil, marshalErrorf("marshal int: value %d out of range", v) - } - return encInt(int32(v)), nil - default: - if rv.IsNil() { - return nil, nil - } - } - - return nil, marshalErrorf("can not marshal %T into %s", value, info) -} - -func encInt(x int32) []byte { - return []byte{byte(x >> 24), byte(x >> 16), byte(x >> 8), byte(x)} -} - -func decInt(x []byte) int32 { - if len(x) != 4 { - return 0 - } - return int32(x[0])<<24 | int32(x[1])<<16 | int32(x[2])<<8 | int32(x[3]) -} - -func encShort(x int16) []byte { - p := make([]byte, 2) - p[0] = byte(x >> 8) - p[1] = byte(x) - return p -} - -func decShort(p []byte) int16 { - if len(p) != 2 { - return 0 - } - return int16(p[0])<<8 | int16(p[1]) -} - -func decTiny(p []byte) int8 { - if len(p) != 1 { - return 0 - } - return int8(p[0]) -} - -func marshalBigInt(info TypeInfo, value interface{}) ([]byte, error) { - switch v := value.(type) { - case Marshaler: - return v.MarshalCQL(info) - case unsetColumn: - return nil, nil - case int: - return encBigInt(int64(v)), nil - case uint: - if uint64(v) > math.MaxInt64 { - return nil, marshalErrorf("marshal bigint: value %d out of range", v) - } - return encBigInt(int64(v)), nil - case int64: - return encBigInt(v), nil - case uint64: - return encBigInt(int64(v)), nil - case int32: - return encBigInt(int64(v)), nil - case uint32: - return encBigInt(int64(v)), nil - case int16: - return encBigInt(int64(v)), nil - case uint16: - return encBigInt(int64(v)), nil - case int8: - return encBigInt(int64(v)), nil - case uint8: - return encBigInt(int64(v)), nil - case big.Int: - return encBigInt2C(&v), nil - case string: - i, err := strconv.ParseInt(value.(string), 10, 64) - if err != nil { - return nil, marshalErrorf("can not marshal string to bigint: %s", err) - } - return encBigInt(i), nil - } - - if value == nil { - return nil, nil - } - - rv := reflect.ValueOf(value) - switch rv.Type().Kind() { - case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: - v := rv.Int() - return encBigInt(v), nil - case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8: - v := rv.Uint() - if v > math.MaxInt64 { - return nil, marshalErrorf("marshal bigint: value %d out of range", v) - } - return encBigInt(int64(v)), nil - } - return nil, marshalErrorf("can not marshal %T into %s", value, info) -} - -func encBigInt(x int64) []byte { - return []byte{byte(x >> 56), byte(x >> 48), byte(x >> 40), byte(x >> 32), - byte(x >> 24), byte(x >> 16), byte(x >> 8), byte(x)} -} - -func bytesToInt64(data []byte) (ret int64) { - for i := range data { - ret |= int64(data[i]) << (8 * uint(len(data)-i-1)) - } - return ret -} - -func bytesToUint64(data []byte) (ret uint64) { - for i := range data { - ret |= uint64(data[i]) << (8 * uint(len(data)-i-1)) - } - return ret -} - -func unmarshalBigInt(info TypeInfo, data []byte, value interface{}) error { - return unmarshalIntlike(info, decBigInt(data), data, value) -} - -func unmarshalInt(info TypeInfo, data []byte, value interface{}) error { - return unmarshalIntlike(info, int64(decInt(data)), data, value) -} - -func unmarshalSmallInt(info TypeInfo, data []byte, value interface{}) error { - return unmarshalIntlike(info, int64(decShort(data)), data, value) -} - -func unmarshalTinyInt(info TypeInfo, data []byte, value interface{}) error { - return unmarshalIntlike(info, int64(decTiny(data)), data, value) -} - -func unmarshalVarint(info TypeInfo, data []byte, value interface{}) error { - switch v := value.(type) { - case *big.Int: - return unmarshalIntlike(info, 0, data, value) - case *uint64: - if len(data) == 9 && data[0] == 0 { - *v = bytesToUint64(data[1:]) - return nil - } - } - - if len(data) > 8 { - return unmarshalErrorf("unmarshal int: varint value %v out of range for %T (use big.Int)", data, value) - } - - int64Val := bytesToInt64(data) - if len(data) > 0 && len(data) < 8 && data[0]&0x80 > 0 { - int64Val -= (1 << uint(len(data)*8)) - } - return unmarshalIntlike(info, int64Val, data, value) -} - -func marshalVarint(info TypeInfo, value interface{}) ([]byte, error) { - var ( - retBytes []byte - err error - ) - - switch v := value.(type) { - case unsetColumn: - return nil, nil - case uint64: - if v > uint64(math.MaxInt64) { - retBytes = make([]byte, 9) - binary.BigEndian.PutUint64(retBytes[1:], v) - } else { - retBytes = make([]byte, 8) - binary.BigEndian.PutUint64(retBytes, v) - } - default: - retBytes, err = marshalBigInt(info, value) - } - - if err == nil { - // trim down to most significant byte - i := 0 - for ; i < len(retBytes)-1; i++ { - b0 := retBytes[i] - if b0 != 0 && b0 != 0xFF { - break - } - - b1 := retBytes[i+1] - if b0 == 0 && b1 != 0 { - if b1&0x80 == 0 { - i++ - } - break - } - - if b0 == 0xFF && b1 != 0xFF { - if b1&0x80 > 0 { - i++ - } - break - } - } - retBytes = retBytes[i:] - } - - return retBytes, err -} - -func unmarshalIntlike(info TypeInfo, int64Val int64, data []byte, value interface{}) error { - switch v := value.(type) { - case *int: - if ^uint(0) == math.MaxUint32 && (int64Val < math.MinInt32 || int64Val > math.MaxInt32) { - return unmarshalErrorf("unmarshal int: value %d out of range for %T", int64Val, *v) - } - *v = int(int64Val) - return nil - case *uint: - unitVal := uint64(int64Val) - if ^uint(0) == math.MaxUint32 && unitVal > math.MaxUint32 { - return unmarshalErrorf("unmarshal int: value %d out of range for %T", unitVal, *v) - } - switch info.Type() { - case TypeInt: - *v = uint(unitVal) & 0xFFFFFFFF - case TypeSmallInt: - *v = uint(unitVal) & 0xFFFF - case TypeTinyInt: - *v = uint(unitVal) & 0xFF - default: - *v = uint(unitVal) - } - return nil - case *int64: - *v = int64Val - return nil - case *uint64: - switch info.Type() { - case TypeInt: - *v = uint64(int64Val) & 0xFFFFFFFF - case TypeSmallInt: - *v = uint64(int64Val) & 0xFFFF - case TypeTinyInt: - *v = uint64(int64Val) & 0xFF - default: - *v = uint64(int64Val) - } - return nil - case *int32: - if int64Val < math.MinInt32 || int64Val > math.MaxInt32 { - return unmarshalErrorf("unmarshal int: value %d out of range for %T", int64Val, *v) - } - *v = int32(int64Val) - return nil - case *uint32: - if int64Val > math.MaxUint32 { - return unmarshalErrorf("unmarshal int: value %d out of range for %T", int64Val, *v) - } - switch info.Type() { - case TypeSmallInt: - *v = uint32(int64Val) & 0xFFFF - case TypeTinyInt: - *v = uint32(int64Val) & 0xFF - default: - *v = uint32(int64Val) & 0xFFFFFFFF - } - return nil - case *int16: - if int64Val < math.MinInt16 || int64Val > math.MaxInt16 { - return unmarshalErrorf("unmarshal int: value %d out of range for %T", int64Val, *v) - } - *v = int16(int64Val) - return nil - case *uint16: - if int64Val > math.MaxUint16 { - return unmarshalErrorf("unmarshal int: value %d out of range for %T", int64Val, *v) - } - switch info.Type() { - case TypeTinyInt: - *v = uint16(int64Val) & 0xFF - default: - *v = uint16(int64Val) & 0xFFFF - } - return nil - case *int8: - if int64Val < math.MinInt8 || int64Val > math.MaxInt8 { - return unmarshalErrorf("unmarshal int: value %d out of range for %T", int64Val, *v) - } - *v = int8(int64Val) - return nil - case *uint8: - if int64Val > math.MaxUint8 { - return unmarshalErrorf("unmarshal int: value %d out of range for %T", int64Val, *v) - } - *v = uint8(int64Val) & 0xFF - return nil - case *big.Int: - decBigInt2C(data, v) - return nil - case *string: - *v = strconv.FormatInt(int64Val, 10) - return nil - } - - rv := reflect.ValueOf(value) - if rv.Kind() != reflect.Ptr { - return unmarshalErrorf("can not unmarshal into non-pointer %T", value) - } - rv = rv.Elem() - - switch rv.Type().Kind() { - case reflect.Int: - if ^uint(0) == math.MaxUint32 && (int64Val < math.MinInt32 || int64Val > math.MaxInt32) { - return unmarshalErrorf("unmarshal int: value %d out of range", int64Val) - } - rv.SetInt(int64Val) - return nil - case reflect.Int64: - rv.SetInt(int64Val) - return nil - case reflect.Int32: - if int64Val < math.MinInt32 || int64Val > math.MaxInt32 { - return unmarshalErrorf("unmarshal int: value %d out of range", int64Val) - } - rv.SetInt(int64Val) - return nil - case reflect.Int16: - if int64Val < math.MinInt16 || int64Val > math.MaxInt16 { - return unmarshalErrorf("unmarshal int: value %d out of range", int64Val) - } - rv.SetInt(int64Val) - return nil - case reflect.Int8: - if int64Val < math.MinInt8 || int64Val > math.MaxInt8 { - return unmarshalErrorf("unmarshal int: value %d out of range", int64Val) - } - rv.SetInt(int64Val) - return nil - case reflect.Uint: - if int64Val < 0 || (^uint(0) == math.MaxUint32 && int64Val > math.MaxUint32) { - return unmarshalErrorf("unmarshal int: value %d out of range", int64Val) - } - rv.SetUint(uint64(int64Val)) - return nil - case reflect.Uint64: - if int64Val < 0 { - return unmarshalErrorf("unmarshal int: value %d out of range", int64Val) - } - rv.SetUint(uint64(int64Val)) - return nil - case reflect.Uint32: - if int64Val < 0 || int64Val > math.MaxUint32 { - return unmarshalErrorf("unmarshal int: value %d out of range", int64Val) - } - rv.SetUint(uint64(int64Val)) - return nil - case reflect.Uint16: - if int64Val < 0 || int64Val > math.MaxUint16 { - return unmarshalErrorf("unmarshal int: value %d out of range", int64Val) - } - rv.SetUint(uint64(int64Val)) - return nil - case reflect.Uint8: - if int64Val < 0 || int64Val > math.MaxUint8 { - return unmarshalErrorf("unmarshal int: value %d out of range", int64Val) - } - rv.SetUint(uint64(int64Val)) - return nil - } - return unmarshalErrorf("can not unmarshal %s into %T", info, value) -} - -func decBigInt(data []byte) int64 { - if len(data) != 8 { - return 0 - } - return int64(data[0])<<56 | int64(data[1])<<48 | - int64(data[2])<<40 | int64(data[3])<<32 | - int64(data[4])<<24 | int64(data[5])<<16 | - int64(data[6])<<8 | int64(data[7]) -} - -func marshalBool(info TypeInfo, value interface{}) ([]byte, error) { - switch v := value.(type) { - case Marshaler: - return v.MarshalCQL(info) - case unsetColumn: - return nil, nil - case bool: - return encBool(v), nil - } - - if value == nil { - return nil, nil - } - - rv := reflect.ValueOf(value) - switch rv.Type().Kind() { - case reflect.Bool: - return encBool(rv.Bool()), nil - } - return nil, marshalErrorf("can not marshal %T into %s", value, info) -} - -func encBool(v bool) []byte { - if v { - return []byte{1} - } - return []byte{0} -} - -func unmarshalBool(info TypeInfo, data []byte, value interface{}) error { - switch v := value.(type) { - case Unmarshaler: - return v.UnmarshalCQL(info, data) - case *bool: - *v = decBool(data) - return nil - } - rv := reflect.ValueOf(value) - if rv.Kind() != reflect.Ptr { - return unmarshalErrorf("can not unmarshal into non-pointer %T", value) - } - rv = rv.Elem() - switch rv.Type().Kind() { - case reflect.Bool: - rv.SetBool(decBool(data)) - return nil - } - return unmarshalErrorf("can not unmarshal %s into %T", info, value) -} - -func decBool(v []byte) bool { - if len(v) == 0 { - return false - } - return v[0] != 0 -} - -func marshalFloat(info TypeInfo, value interface{}) ([]byte, error) { - switch v := value.(type) { - case Marshaler: - return v.MarshalCQL(info) - case unsetColumn: - return nil, nil - case float32: - return encInt(int32(math.Float32bits(v))), nil - } - - if value == nil { - return nil, nil - } - - rv := reflect.ValueOf(value) - switch rv.Type().Kind() { - case reflect.Float32: - return encInt(int32(math.Float32bits(float32(rv.Float())))), nil - } - return nil, marshalErrorf("can not marshal %T into %s", value, info) -} - -func unmarshalFloat(info TypeInfo, data []byte, value interface{}) error { - switch v := value.(type) { - case Unmarshaler: - return v.UnmarshalCQL(info, data) - case *float32: - *v = math.Float32frombits(uint32(decInt(data))) - return nil - } - rv := reflect.ValueOf(value) - if rv.Kind() != reflect.Ptr { - return unmarshalErrorf("can not unmarshal into non-pointer %T", value) - } - rv = rv.Elem() - switch rv.Type().Kind() { - case reflect.Float32: - rv.SetFloat(float64(math.Float32frombits(uint32(decInt(data))))) - return nil - } - return unmarshalErrorf("can not unmarshal %s into %T", info, value) -} - -func marshalDouble(info TypeInfo, value interface{}) ([]byte, error) { - switch v := value.(type) { - case Marshaler: - return v.MarshalCQL(info) - case unsetColumn: - return nil, nil - case float64: - return encBigInt(int64(math.Float64bits(v))), nil - } - if value == nil { - return nil, nil - } - rv := reflect.ValueOf(value) - switch rv.Type().Kind() { - case reflect.Float64: - return encBigInt(int64(math.Float64bits(rv.Float()))), nil - } - return nil, marshalErrorf("can not marshal %T into %s", value, info) -} - -func unmarshalDouble(info TypeInfo, data []byte, value interface{}) error { - switch v := value.(type) { - case Unmarshaler: - return v.UnmarshalCQL(info, data) - case *float64: - *v = math.Float64frombits(uint64(decBigInt(data))) - return nil - } - rv := reflect.ValueOf(value) - if rv.Kind() != reflect.Ptr { - return unmarshalErrorf("can not unmarshal into non-pointer %T", value) - } - rv = rv.Elem() - switch rv.Type().Kind() { - case reflect.Float64: - rv.SetFloat(math.Float64frombits(uint64(decBigInt(data)))) - return nil - } - return unmarshalErrorf("can not unmarshal %s into %T", info, value) -} - -func marshalDecimal(info TypeInfo, value interface{}) ([]byte, error) { - if value == nil { - return nil, nil - } - - switch v := value.(type) { - case Marshaler: - return v.MarshalCQL(info) - case unsetColumn: - return nil, nil - case inf.Dec: - unscaled := encBigInt2C(v.UnscaledBig()) - if unscaled == nil { - return nil, marshalErrorf("can not marshal %T into %s", value, info) - } - - buf := make([]byte, 4+len(unscaled)) - copy(buf[0:4], encInt(int32(v.Scale()))) - copy(buf[4:], unscaled) - return buf, nil - } - return nil, marshalErrorf("can not marshal %T into %s", value, info) -} - -func unmarshalDecimal(info TypeInfo, data []byte, value interface{}) error { - switch v := value.(type) { - case Unmarshaler: - return v.UnmarshalCQL(info, data) - case *inf.Dec: - scale := decInt(data[0:4]) - unscaled := decBigInt2C(data[4:], nil) - *v = *inf.NewDecBig(unscaled, inf.Scale(scale)) - return nil - } - return unmarshalErrorf("can not unmarshal %s into %T", info, value) -} - -// decBigInt2C sets the value of n to the big-endian two's complement -// value stored in the given data. If data[0]&80 != 0, the number -// is negative. If data is empty, the result will be 0. -func decBigInt2C(data []byte, n *big.Int) *big.Int { - if n == nil { - n = new(big.Int) - } - n.SetBytes(data) - if len(data) > 0 && data[0]&0x80 > 0 { - n.Sub(n, new(big.Int).Lsh(bigOne, uint(len(data))*8)) - } - return n -} - -// encBigInt2C returns the big-endian two's complement -// form of n. -func encBigInt2C(n *big.Int) []byte { - switch n.Sign() { - case 0: - return []byte{0} - case 1: - b := n.Bytes() - if b[0]&0x80 > 0 { - b = append([]byte{0}, b...) - } - return b - case -1: - length := uint(n.BitLen()/8+1) * 8 - b := new(big.Int).Add(n, new(big.Int).Lsh(bigOne, length)).Bytes() - // When the most significant bit is on a byte - // boundary, we can get some extra significant - // bits, so strip them off when that happens. - if len(b) >= 2 && b[0] == 0xff && b[1]&0x80 != 0 { - b = b[1:] - } - return b - } - return nil -} - -func marshalTimestamp(info TypeInfo, value interface{}) ([]byte, error) { - switch v := value.(type) { - case Marshaler: - return v.MarshalCQL(info) - case unsetColumn: - return nil, nil - case int64: - return encBigInt(v), nil - case time.Time: - if v.IsZero() { - return []byte{}, nil - } - x := int64(v.UTC().Unix()*1e3) + int64(v.UTC().Nanosecond()/1e6) - return encBigInt(x), nil - case time.Duration: - return encBigInt(v.Nanoseconds()), nil - } - - if value == nil { - return nil, nil - } - - rv := reflect.ValueOf(value) - switch rv.Type().Kind() { - case reflect.Int64: - return encBigInt(rv.Int()), nil - } - return nil, marshalErrorf("can not marshal %T into %s", value, info) -} - -func unmarshalTimestamp(info TypeInfo, data []byte, value interface{}) error { - switch v := value.(type) { - case Unmarshaler: - return v.UnmarshalCQL(info, data) - case *int64: - *v = decBigInt(data) - return nil - case *time.Time: - if len(data) == 0 { - *v = time.Time{} - return nil - } - x := decBigInt(data) - sec := x / 1000 - nsec := (x - sec*1000) * 1000000 - *v = time.Unix(sec, nsec).In(time.UTC) - return nil - case *time.Duration: - *v = time.Duration(decBigInt(data)) - } - - rv := reflect.ValueOf(value) - if rv.Kind() != reflect.Ptr { - return unmarshalErrorf("can not unmarshal into non-pointer %T", value) - } - rv = rv.Elem() - switch rv.Type().Kind() { - case reflect.Int64: - rv.SetInt(decBigInt(data)) - return nil - } - return unmarshalErrorf("can not unmarshal %s into %T", info, value) -} - -func marshalDate(info TypeInfo, value interface{}) ([]byte, error) { - var timestamp int64 - switch v := value.(type) { - case Marshaler: - return v.MarshalCQL(info) - case unsetColumn: - return nil, nil - case int64: - timestamp = v - x := timestamp/86400000 + int64(1<<31) - return encInt(int32(x)), nil - case time.Time: - if v.IsZero() { - return []byte{}, nil - } - timestamp = int64(v.UTC().Unix()*1e3) + int64(v.UTC().Nanosecond()/1e6) - x := timestamp/86400000 + int64(1<<31) - return encInt(int32(x)), nil - case *time.Time: - if v.IsZero() { - return []byte{}, nil - } - timestamp = int64(v.UTC().Unix()*1e3) + int64(v.UTC().Nanosecond()/1e6) - x := timestamp/86400000 + int64(1<<31) - return encInt(int32(x)), nil - case string: - if v == "" { - return []byte{}, nil - } - t, err := time.Parse("2006-01-02", v) - if err != nil { - return nil, marshalErrorf("can not marshal %T into %s, date layout must be '2006-01-02'", value, info) - } - timestamp = int64(t.UTC().Unix()*1e3) + int64(t.UTC().Nanosecond()/1e6) - x := timestamp/86400000 + int64(1<<31) - return encInt(int32(x)), nil - } - - if value == nil { - return nil, nil - } - return nil, marshalErrorf("can not marshal %T into %s", value, info) -} - -func unmarshalDate(info TypeInfo, data []byte, value interface{}) error { - switch v := value.(type) { - case Unmarshaler: - return v.UnmarshalCQL(info, data) - case *time.Time: - if len(data) == 0 { - *v = time.Time{} - return nil - } - var origin uint32 = 1 << 31 - var current uint32 = binary.BigEndian.Uint32(data) - timestamp := (int64(current) - int64(origin)) * 86400000 - *v = time.Unix(0, timestamp*int64(time.Millisecond)).In(time.UTC) - return nil - } - return unmarshalErrorf("can not unmarshal %s into %T", info, value) -} - -func writeCollectionSize(info CollectionType, n int, buf *bytes.Buffer) error { - if info.proto > protoVersion2 { - if n > math.MaxInt32 { - return marshalErrorf("marshal: collection too large") - } - - buf.WriteByte(byte(n >> 24)) - buf.WriteByte(byte(n >> 16)) - buf.WriteByte(byte(n >> 8)) - buf.WriteByte(byte(n)) - } else { - if n > math.MaxUint16 { - return marshalErrorf("marshal: collection too large") - } - - buf.WriteByte(byte(n >> 8)) - buf.WriteByte(byte(n)) - } - - return nil -} - -func marshalList(info TypeInfo, value interface{}) ([]byte, error) { - listInfo, ok := info.(CollectionType) - if !ok { - return nil, marshalErrorf("marshal: can not marshal non collection type into list") - } - - if value == nil { - return nil, nil - } else if _, ok := value.(unsetColumn); ok { - return nil, nil - } - - rv := reflect.ValueOf(value) - t := rv.Type() - k := t.Kind() - if k == reflect.Slice && rv.IsNil() { - return nil, nil - } - - switch k { - case reflect.Slice, reflect.Array: - buf := &bytes.Buffer{} - n := rv.Len() - - if err := writeCollectionSize(listInfo, n, buf); err != nil { - return nil, err - } - - for i := 0; i < n; i++ { - item, err := Marshal(listInfo.Elem, rv.Index(i).Interface()) - if err != nil { - return nil, err - } - if err := writeCollectionSize(listInfo, len(item), buf); err != nil { - return nil, err - } - buf.Write(item) - } - return buf.Bytes(), nil - case reflect.Map: - elem := t.Elem() - if elem.Kind() == reflect.Struct && elem.NumField() == 0 { - rkeys := rv.MapKeys() - keys := make([]interface{}, len(rkeys)) - for i := 0; i < len(keys); i++ { - keys[i] = rkeys[i].Interface() - } - return marshalList(listInfo, keys) - } - } - return nil, marshalErrorf("can not marshal %T into %s", value, info) -} - -func readCollectionSize(info CollectionType, data []byte) (size, read int) { - if info.proto > protoVersion2 { - size = int(data[0])<<24 | int(data[1])<<16 | int(data[2])<<8 | int(data[3]) - read = 4 - } else { - size = int(data[0])<<8 | int(data[1]) - read = 2 - } - return -} - -func unmarshalList(info TypeInfo, data []byte, value interface{}) error { - listInfo, ok := info.(CollectionType) - if !ok { - return unmarshalErrorf("unmarshal: can not unmarshal none collection type into list") - } - - rv := reflect.ValueOf(value) - if rv.Kind() != reflect.Ptr { - return unmarshalErrorf("can not unmarshal into non-pointer %T", value) - } - rv = rv.Elem() - t := rv.Type() - k := t.Kind() - - switch k { - case reflect.Slice, reflect.Array: - if data == nil { - if k == reflect.Array { - return unmarshalErrorf("unmarshal list: can not store nil in array value") - } - if rv.IsNil() { - return nil - } - rv.Set(reflect.Zero(t)) - return nil - } - if len(data) < 2 { - return unmarshalErrorf("unmarshal list: unexpected eof") - } - n, p := readCollectionSize(listInfo, data) - data = data[p:] - if k == reflect.Array { - if rv.Len() != n { - return unmarshalErrorf("unmarshal list: array with wrong size") - } - } else { - rv.Set(reflect.MakeSlice(t, n, n)) - } - for i := 0; i < n; i++ { - if len(data) < 2 { - return unmarshalErrorf("unmarshal list: unexpected eof") - } - m, p := readCollectionSize(listInfo, data) - data = data[p:] - if err := Unmarshal(listInfo.Elem, data[:m], rv.Index(i).Addr().Interface()); err != nil { - return err - } - data = data[m:] - } - return nil - } - return unmarshalErrorf("can not unmarshal %s into %T", info, value) -} - -func marshalMap(info TypeInfo, value interface{}) ([]byte, error) { - mapInfo, ok := info.(CollectionType) - if !ok { - return nil, marshalErrorf("marshal: can not marshal none collection type into map") - } - - if value == nil { - return nil, nil - } else if _, ok := value.(unsetColumn); ok { - return nil, nil - } - - rv := reflect.ValueOf(value) - if rv.IsNil() { - return nil, nil - } - - t := rv.Type() - if t.Kind() != reflect.Map { - return nil, marshalErrorf("can not marshal %T into %s", value, info) - } - - buf := &bytes.Buffer{} - n := rv.Len() - - if err := writeCollectionSize(mapInfo, n, buf); err != nil { - return nil, err - } - - keys := rv.MapKeys() - for _, key := range keys { - item, err := Marshal(mapInfo.Key, key.Interface()) - if err != nil { - return nil, err - } - if err := writeCollectionSize(mapInfo, len(item), buf); err != nil { - return nil, err - } - buf.Write(item) - - item, err = Marshal(mapInfo.Elem, rv.MapIndex(key).Interface()) - if err != nil { - return nil, err - } - if err := writeCollectionSize(mapInfo, len(item), buf); err != nil { - return nil, err - } - buf.Write(item) - } - return buf.Bytes(), nil -} - -func unmarshalMap(info TypeInfo, data []byte, value interface{}) error { - mapInfo, ok := info.(CollectionType) - if !ok { - return unmarshalErrorf("unmarshal: can not unmarshal none collection type into map") - } - - rv := reflect.ValueOf(value) - if rv.Kind() != reflect.Ptr { - return unmarshalErrorf("can not unmarshal into non-pointer %T", value) - } - rv = rv.Elem() - t := rv.Type() - if t.Kind() != reflect.Map { - return unmarshalErrorf("can not unmarshal %s into %T", info, value) - } - if data == nil { - rv.Set(reflect.Zero(t)) - return nil - } - rv.Set(reflect.MakeMap(t)) - if len(data) < 2 { - return unmarshalErrorf("unmarshal map: unexpected eof") - } - n, p := readCollectionSize(mapInfo, data) - data = data[p:] - for i := 0; i < n; i++ { - if len(data) < 2 { - return unmarshalErrorf("unmarshal list: unexpected eof") - } - m, p := readCollectionSize(mapInfo, data) - data = data[p:] - key := reflect.New(t.Key()) - if err := Unmarshal(mapInfo.Key, data[:m], key.Interface()); err != nil { - return err - } - data = data[m:] - - m, p = readCollectionSize(mapInfo, data) - data = data[p:] - val := reflect.New(t.Elem()) - if err := Unmarshal(mapInfo.Elem, data[:m], val.Interface()); err != nil { - return err - } - data = data[m:] - - rv.SetMapIndex(key.Elem(), val.Elem()) - } - return nil -} - -func marshalUUID(info TypeInfo, value interface{}) ([]byte, error) { - switch val := value.(type) { - case unsetColumn: - return nil, nil - case UUID: - return val.Bytes(), nil - case []byte: - if len(val) != 16 { - return nil, marshalErrorf("can not marshal []byte %d bytes long into %s, must be exactly 16 bytes long", len(val), info) - } - return val, nil - case string: - b, err := ParseUUID(val) - if err != nil { - return nil, err - } - return b[:], nil - } - - if value == nil { - return nil, nil - } - - return nil, marshalErrorf("can not marshal %T into %s", value, info) -} - -func unmarshalUUID(info TypeInfo, data []byte, value interface{}) error { - if data == nil || len(data) == 0 { - switch v := value.(type) { - case *string: - *v = "" - case *[]byte: - *v = nil - case *UUID: - *v = UUID{} - default: - return unmarshalErrorf("can not unmarshal X %s into %T", info, value) - } - - return nil - } - - u, err := UUIDFromBytes(data) - if err != nil { - return unmarshalErrorf("Unable to parse UUID: %s", err) - } - - switch v := value.(type) { - case *string: - *v = u.String() - return nil - case *[]byte: - *v = u[:] - return nil - case *UUID: - *v = u - return nil - } - return unmarshalErrorf("can not unmarshal X %s into %T", info, value) -} - -func unmarshalTimeUUID(info TypeInfo, data []byte, value interface{}) error { - switch v := value.(type) { - case Unmarshaler: - return v.UnmarshalCQL(info, data) - case *time.Time: - id, err := UUIDFromBytes(data) - if err != nil { - return err - } else if id.Version() != 1 { - return unmarshalErrorf("invalid timeuuid") - } - *v = id.Time() - return nil - default: - return unmarshalUUID(info, data, value) - } -} - -func marshalInet(info TypeInfo, value interface{}) ([]byte, error) { - // we return either the 4 or 16 byte representation of an - // ip address here otherwise the db value will be prefixed - // with the remaining byte values e.g. ::ffff:127.0.0.1 and not 127.0.0.1 - switch val := value.(type) { - case unsetColumn: - return nil, nil - case net.IP: - t := val.To4() - if t == nil { - return val.To16(), nil - } - return t, nil - case string: - b := net.ParseIP(val) - if b != nil { - t := b.To4() - if t == nil { - return b.To16(), nil - } - return t, nil - } - return nil, marshalErrorf("cannot marshal. invalid ip string %s", val) - } - - if value == nil { - return nil, nil - } - - return nil, marshalErrorf("cannot marshal %T into %s", value, info) -} - -func unmarshalInet(info TypeInfo, data []byte, value interface{}) error { - switch v := value.(type) { - case Unmarshaler: - return v.UnmarshalCQL(info, data) - case *net.IP: - if x := len(data); !(x == 4 || x == 16) { - return unmarshalErrorf("cannot unmarshal %s into %T: invalid sized IP: got %d bytes not 4 or 16", info, value, x) - } - buf := copyBytes(data) - ip := net.IP(buf) - if v4 := ip.To4(); v4 != nil { - *v = v4 - return nil - } - *v = ip - return nil - case *string: - if len(data) == 0 { - *v = "" - return nil - } - ip := net.IP(data) - if v4 := ip.To4(); v4 != nil { - *v = v4.String() - return nil - } - *v = ip.String() - return nil - } - return unmarshalErrorf("cannot unmarshal %s into %T", info, value) -} - -func marshalTuple(info TypeInfo, value interface{}) ([]byte, error) { - tuple := info.(TupleTypeInfo) - switch v := value.(type) { - case unsetColumn: - return nil, unmarshalErrorf("Invalid request: UnsetValue is unsupported for tuples") - case []interface{}: - if len(v) != len(tuple.Elems) { - return nil, unmarshalErrorf("cannont marshal tuple: wrong number of elements") - } - - var buf []byte - for i, elem := range v { - data, err := Marshal(tuple.Elems[i], elem) - if err != nil { - return nil, err - } - - n := len(data) - buf = appendInt(buf, int32(n)) - buf = append(buf, data...) - } - - return buf, nil - } - - rv := reflect.ValueOf(value) - t := rv.Type() - k := t.Kind() - - switch k { - case reflect.Struct: - if v := t.NumField(); v != len(tuple.Elems) { - return nil, marshalErrorf("can not marshal tuple into struct %v, not enough fields have %d need %d", t, v, len(tuple.Elems)) - } - - var buf []byte - for i, elem := range tuple.Elems { - data, err := Marshal(elem, rv.Field(i).Interface()) - if err != nil { - return nil, err - } - - n := len(data) - buf = appendInt(buf, int32(n)) - buf = append(buf, data...) - } - - return buf, nil - case reflect.Slice, reflect.Array: - size := rv.Len() - if size != len(tuple.Elems) { - return nil, marshalErrorf("can not marshal tuple into %v of length %d need %d elements", k, size, len(tuple.Elems)) - } - - var buf []byte - for i, elem := range tuple.Elems { - data, err := Marshal(elem, rv.Index(i).Interface()) - if err != nil { - return nil, err - } - - n := len(data) - buf = appendInt(buf, int32(n)) - buf = append(buf, data...) - } - - return buf, nil - } - - return nil, marshalErrorf("cannot marshal %T into %s", value, tuple) -} - -func readBytes(p []byte) ([]byte, []byte) { - // TODO: really should use a framer - size := readInt(p) - p = p[4:] - if size < 0 { - return nil, p - } - return p[:size], p[size:] -} - -// currently only support unmarshal into a list of values, this makes it possible -// to support tuples without changing the query API. In the future this can be extend -// to allow unmarshalling into custom tuple types. -func unmarshalTuple(info TypeInfo, data []byte, value interface{}) error { - if v, ok := value.(Unmarshaler); ok { - return v.UnmarshalCQL(info, data) - } - - tuple := info.(TupleTypeInfo) - switch v := value.(type) { - case []interface{}: - for i, elem := range tuple.Elems { - // each element inside data is a [bytes] - var p []byte - p, data = readBytes(data) - - err := Unmarshal(elem, p, v[i]) - if err != nil { - return err - } - } - - return nil - } - - rv := reflect.ValueOf(value) - if rv.Kind() != reflect.Ptr { - return unmarshalErrorf("can not unmarshal into non-pointer %T", value) - } - - rv = rv.Elem() - t := rv.Type() - k := t.Kind() - - switch k { - case reflect.Struct: - if v := t.NumField(); v != len(tuple.Elems) { - return unmarshalErrorf("can not unmarshal tuple into struct %v, not enough fields have %d need %d", t, v, len(tuple.Elems)) - } - - for i, elem := range tuple.Elems { - m := readInt(data) - data = data[4:] - - v := elem.New() - if err := Unmarshal(elem, data[:m], v); err != nil { - return err - } - rv.Field(i).Set(reflect.ValueOf(v).Elem()) - - data = data[m:] - } - - return nil - case reflect.Slice, reflect.Array: - if k == reflect.Array { - size := rv.Len() - if size != len(tuple.Elems) { - return unmarshalErrorf("can not unmarshal tuple into array of length %d need %d elements", size, len(tuple.Elems)) - } - } else { - rv.Set(reflect.MakeSlice(t, len(tuple.Elems), len(tuple.Elems))) - } - - for i, elem := range tuple.Elems { - m := readInt(data) - data = data[4:] - - v := elem.New() - if err := Unmarshal(elem, data[:m], v); err != nil { - return err - } - rv.Index(i).Set(reflect.ValueOf(v).Elem()) - - data = data[m:] - } - - return nil - } - - return unmarshalErrorf("cannot unmarshal %s into %T", info, value) -} - -// UDTMarshaler is an interface which should be implemented by users wishing to -// handle encoding UDT types to sent to Cassandra. Note: due to current implentations -// methods defined for this interface must be value receivers not pointer receivers. -type UDTMarshaler interface { - // MarshalUDT will be called for each field in the the UDT returned by Cassandra, - // the implementor should marshal the type to return by for example calling - // Marshal. - MarshalUDT(name string, info TypeInfo) ([]byte, error) -} - -// UDTUnmarshaler should be implemented by users wanting to implement custom -// UDT unmarshaling. -type UDTUnmarshaler interface { - // UnmarshalUDT will be called for each field in the UDT return by Cassandra, - // the implementor should unmarshal the data into the value of their chosing, - // for example by calling Unmarshal. - UnmarshalUDT(name string, info TypeInfo, data []byte) error -} - -func marshalUDT(info TypeInfo, value interface{}) ([]byte, error) { - udt := info.(UDTTypeInfo) - - switch v := value.(type) { - case Marshaler: - return v.MarshalCQL(info) - case unsetColumn: - return nil, unmarshalErrorf("Invalid request: UnsetValue is unsupported for user defined types") - case UDTMarshaler: - var buf []byte - for _, e := range udt.Elements { - data, err := v.MarshalUDT(e.Name, e.Type) - if err != nil { - return nil, err - } - - buf = appendBytes(buf, data) - } - - return buf, nil - case map[string]interface{}: - var buf []byte - for _, e := range udt.Elements { - val, ok := v[e.Name] - if !ok { - continue - } - - data, err := Marshal(e.Type, val) - if err != nil { - return nil, err - } - - buf = appendBytes(buf, data) - } - - return buf, nil - } - - k := reflect.ValueOf(value) - if k.Kind() == reflect.Ptr { - if k.IsNil() { - return nil, marshalErrorf("cannot marshal %T into %s", value, info) - } - k = k.Elem() - } - - if k.Kind() != reflect.Struct || !k.IsValid() { - return nil, marshalErrorf("cannot marshal %T into %s", value, info) - } - - fields := make(map[string]reflect.Value) - t := reflect.TypeOf(value) - for i := 0; i < t.NumField(); i++ { - sf := t.Field(i) - - if tag := sf.Tag.Get("cql"); tag != "" { - fields[tag] = k.Field(i) - } - } - - var buf []byte - for _, e := range udt.Elements { - f, ok := fields[e.Name] - if !ok { - f = k.FieldByName(e.Name) - } - - var data []byte - if f.IsValid() && f.CanInterface() { - var err error - data, err = Marshal(e.Type, f.Interface()) - if err != nil { - return nil, err - } - } - - buf = appendBytes(buf, data) - } - - return buf, nil -} - -func unmarshalUDT(info TypeInfo, data []byte, value interface{}) error { - switch v := value.(type) { - case Unmarshaler: - return v.UnmarshalCQL(info, data) - case UDTUnmarshaler: - udt := info.(UDTTypeInfo) - - for _, e := range udt.Elements { - if len(data) == 0 { - return nil - } - - var p []byte - p, data = readBytes(data) - - if err := v.UnmarshalUDT(e.Name, e.Type, p); err != nil { - return err - } - } - - return nil - case *map[string]interface{}: - udt := info.(UDTTypeInfo) - - rv := reflect.ValueOf(value) - if rv.Kind() != reflect.Ptr { - return unmarshalErrorf("can not unmarshal into non-pointer %T", value) - } - - rv = rv.Elem() - t := rv.Type() - if t.Kind() != reflect.Map { - return unmarshalErrorf("can not unmarshal %s into %T", info, value) - } else if data == nil { - rv.Set(reflect.Zero(t)) - return nil - } - - rv.Set(reflect.MakeMap(t)) - m := *v - - for _, e := range udt.Elements { - if len(data) == 0 { - return nil - } - - val := reflect.New(goType(e.Type)) - - var p []byte - p, data = readBytes(data) - - if err := Unmarshal(e.Type, p, val.Interface()); err != nil { - return err - } - - m[e.Name] = val.Elem().Interface() - } - - return nil - } - - k := reflect.ValueOf(value).Elem() - if k.Kind() != reflect.Struct || !k.IsValid() { - return unmarshalErrorf("cannot unmarshal %s into %T", info, value) - } - - if len(data) == 0 { - if k.CanSet() { - k.Set(reflect.Zero(k.Type())) - } - - return nil - } - - t := k.Type() - fields := make(map[string]reflect.Value, t.NumField()) - for i := 0; i < t.NumField(); i++ { - sf := t.Field(i) - - if tag := sf.Tag.Get("cql"); tag != "" { - fields[tag] = k.Field(i) - } - } - - udt := info.(UDTTypeInfo) - for _, e := range udt.Elements { - if len(data) < 4 { - // UDT def does not match the column value - return nil - } - - var p []byte - p, data = readBytes(data) - - f, ok := fields[e.Name] - if !ok { - f = k.FieldByName(e.Name) - if f == emptyValue { - // skip fields which exist in the UDT but not in - // the struct passed in - continue - } - } - - if !f.IsValid() || !f.CanAddr() { - return unmarshalErrorf("cannot unmarshal %s into %T: field %v is not valid", info, value, e.Name) - } - - fk := f.Addr().Interface() - if err := Unmarshal(e.Type, p, fk); err != nil { - return err - } - } - - return nil -} - -// TypeInfo describes a Cassandra specific data type. -type TypeInfo interface { - Type() Type - Version() byte - Custom() string - - // New creates a pointer to an empty version of whatever type - // is referenced by the TypeInfo receiver - New() interface{} -} - -type NativeType struct { - proto byte - typ Type - custom string // only used for TypeCustom -} - -func NewNativeType(proto byte, typ Type, custom string) NativeType { - return NativeType{proto, typ, custom} -} - -func (t NativeType) New() interface{} { - return reflect.New(goType(t)).Interface() -} - -func (s NativeType) Type() Type { - return s.typ -} - -func (s NativeType) Version() byte { - return s.proto -} - -func (s NativeType) Custom() string { - return s.custom -} - -func (s NativeType) String() string { - switch s.typ { - case TypeCustom: - return fmt.Sprintf("%s(%s)", s.typ, s.custom) - default: - return s.typ.String() - } -} - -type CollectionType struct { - NativeType - Key TypeInfo // only used for TypeMap - Elem TypeInfo // only used for TypeMap, TypeList and TypeSet -} - -func (t CollectionType) New() interface{} { - return reflect.New(goType(t)).Interface() -} - -func (c CollectionType) String() string { - switch c.typ { - case TypeMap: - return fmt.Sprintf("%s(%s, %s)", c.typ, c.Key, c.Elem) - case TypeList, TypeSet: - return fmt.Sprintf("%s(%s)", c.typ, c.Elem) - case TypeCustom: - return fmt.Sprintf("%s(%s)", c.typ, c.custom) - default: - return c.typ.String() - } -} - -type TupleTypeInfo struct { - NativeType - Elems []TypeInfo -} - -func (t TupleTypeInfo) String() string { - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("%s(", t.typ)) - for _, elem := range t.Elems { - buf.WriteString(fmt.Sprintf("%s, ", elem)) - } - buf.Truncate(buf.Len() - 2) - buf.WriteByte(')') - return buf.String() -} - -func (t TupleTypeInfo) New() interface{} { - return reflect.New(goType(t)).Interface() -} - -type UDTField struct { - Name string - Type TypeInfo -} - -type UDTTypeInfo struct { - NativeType - KeySpace string - Name string - Elements []UDTField -} - -func (u UDTTypeInfo) New() interface{} { - return reflect.New(goType(u)).Interface() -} - -func (u UDTTypeInfo) String() string { - buf := &bytes.Buffer{} - - fmt.Fprintf(buf, "%s.%s{", u.KeySpace, u.Name) - first := true - for _, e := range u.Elements { - if !first { - fmt.Fprint(buf, ",") - } else { - first = false - } - - fmt.Fprintf(buf, "%s=%v", e.Name, e.Type) - } - fmt.Fprint(buf, "}") - - return buf.String() -} - -// String returns a human readable name for the Cassandra datatype -// described by t. -// Type is the identifier of a Cassandra internal datatype. -type Type int - -const ( - TypeCustom Type = 0x0000 - TypeAscii Type = 0x0001 - TypeBigInt Type = 0x0002 - TypeBlob Type = 0x0003 - TypeBoolean Type = 0x0004 - TypeCounter Type = 0x0005 - TypeDecimal Type = 0x0006 - TypeDouble Type = 0x0007 - TypeFloat Type = 0x0008 - TypeInt Type = 0x0009 - TypeText Type = 0x000A - TypeTimestamp Type = 0x000B - TypeUUID Type = 0x000C - TypeVarchar Type = 0x000D - TypeVarint Type = 0x000E - TypeTimeUUID Type = 0x000F - TypeInet Type = 0x0010 - TypeDate Type = 0x0011 - TypeTime Type = 0x0012 - TypeSmallInt Type = 0x0013 - TypeTinyInt Type = 0x0014 - TypeList Type = 0x0020 - TypeMap Type = 0x0021 - TypeSet Type = 0x0022 - TypeUDT Type = 0x0030 - TypeTuple Type = 0x0031 -) - -// String returns the name of the identifier. -func (t Type) String() string { - switch t { - case TypeCustom: - return "custom" - case TypeAscii: - return "ascii" - case TypeBigInt: - return "bigint" - case TypeBlob: - return "blob" - case TypeBoolean: - return "boolean" - case TypeCounter: - return "counter" - case TypeDecimal: - return "decimal" - case TypeDouble: - return "double" - case TypeFloat: - return "float" - case TypeInt: - return "int" - case TypeText: - return "text" - case TypeTimestamp: - return "timestamp" - case TypeUUID: - return "uuid" - case TypeVarchar: - return "varchar" - case TypeTimeUUID: - return "timeuuid" - case TypeInet: - return "inet" - case TypeDate: - return "date" - case TypeTime: - return "time" - case TypeSmallInt: - return "smallint" - case TypeTinyInt: - return "tinyint" - case TypeList: - return "list" - case TypeMap: - return "map" - case TypeSet: - return "set" - case TypeVarint: - return "varint" - case TypeTuple: - return "tuple" - default: - return fmt.Sprintf("unknown_type_%d", t) - } -} - -type MarshalError string - -func (m MarshalError) Error() string { - return string(m) -} - -func marshalErrorf(format string, args ...interface{}) MarshalError { - return MarshalError(fmt.Sprintf(format, args...)) -} - -type UnmarshalError string - -func (m UnmarshalError) Error() string { - return string(m) -} - -func unmarshalErrorf(format string, args ...interface{}) UnmarshalError { - return UnmarshalError(fmt.Sprintf(format, args...)) -} diff --git a/vendor/github.com/gocql/gocql/metadata.go b/vendor/github.com/gocql/gocql/metadata.go deleted file mode 100644 index 45c11dfa56..0000000000 --- a/vendor/github.com/gocql/gocql/metadata.go +++ /dev/null @@ -1,1092 +0,0 @@ -// Copyright (c) 2015 The gocql Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gocql - -import ( - "encoding/hex" - "encoding/json" - "fmt" - "strconv" - "strings" - "sync" -) - -// schema metadata for a keyspace -type KeyspaceMetadata struct { - Name string - DurableWrites bool - StrategyClass string - StrategyOptions map[string]interface{} - Tables map[string]*TableMetadata -} - -// schema metadata for a table (a.k.a. column family) -type TableMetadata struct { - Keyspace string - Name string - KeyValidator string - Comparator string - DefaultValidator string - KeyAliases []string - ColumnAliases []string - ValueAlias string - PartitionKey []*ColumnMetadata - ClusteringColumns []*ColumnMetadata - Columns map[string]*ColumnMetadata - OrderedColumns []string -} - -// schema metadata for a column -type ColumnMetadata struct { - Keyspace string - Table string - Name string - ComponentIndex int - Kind ColumnKind - Validator string - Type TypeInfo - ClusteringOrder string - Order ColumnOrder - Index ColumnIndexMetadata -} - -// the ordering of the column with regard to its comparator -type ColumnOrder bool - -const ( - ASC ColumnOrder = false - DESC = true -) - -type ColumnIndexMetadata struct { - Name string - Type string - Options map[string]interface{} -} - -type ColumnKind int - -const ( - ColumnUnkownKind ColumnKind = iota - ColumnPartitionKey - ColumnClusteringKey - ColumnRegular - ColumnCompact - ColumnStatic -) - -func (c ColumnKind) String() string { - switch c { - case ColumnPartitionKey: - return "partition_key" - case ColumnClusteringKey: - return "clustering_key" - case ColumnRegular: - return "regular" - case ColumnCompact: - return "compact" - case ColumnStatic: - return "static" - default: - return fmt.Sprintf("unknown_column_%d", c) - } -} - -func (c *ColumnKind) UnmarshalCQL(typ TypeInfo, p []byte) error { - if typ.Type() != TypeVarchar { - return unmarshalErrorf("unable to marshall %s into ColumnKind, expected Varchar", typ) - } - - kind, err := columnKindFromSchema(string(p)) - if err != nil { - return err - } - *c = kind - - return nil -} - -func columnKindFromSchema(kind string) (ColumnKind, error) { - switch kind { - case "partition_key": - return ColumnPartitionKey, nil - case "clustering_key", "clustering": - return ColumnClusteringKey, nil - case "regular": - return ColumnRegular, nil - case "compact_value": - return ColumnCompact, nil - case "static": - return ColumnStatic, nil - default: - return -1, fmt.Errorf("unknown column kind: %q", kind) - } -} - -// default alias values -const ( - DEFAULT_KEY_ALIAS = "key" - DEFAULT_COLUMN_ALIAS = "column" - DEFAULT_VALUE_ALIAS = "value" -) - -// queries the cluster for schema information for a specific keyspace -type schemaDescriber struct { - session *Session - mu sync.Mutex - - cache map[string]*KeyspaceMetadata -} - -// creates a session bound schema describer which will query and cache -// keyspace metadata -func newSchemaDescriber(session *Session) *schemaDescriber { - return &schemaDescriber{ - session: session, - cache: map[string]*KeyspaceMetadata{}, - } -} - -// returns the cached KeyspaceMetadata held by the describer for the named -// keyspace. -func (s *schemaDescriber) getSchema(keyspaceName string) (*KeyspaceMetadata, error) { - s.mu.Lock() - defer s.mu.Unlock() - - metadata, found := s.cache[keyspaceName] - if !found { - // refresh the cache for this keyspace - err := s.refreshSchema(keyspaceName) - if err != nil { - return nil, err - } - - metadata = s.cache[keyspaceName] - } - - return metadata, nil -} - -// clears the already cached keyspace metadata -func (s *schemaDescriber) clearSchema(keyspaceName string) { - s.mu.Lock() - defer s.mu.Unlock() - - delete(s.cache, keyspaceName) -} - -// forcibly updates the current KeyspaceMetadata held by the schema describer -// for a given named keyspace. -func (s *schemaDescriber) refreshSchema(keyspaceName string) error { - var err error - - // query the system keyspace for schema data - // TODO retrieve concurrently - keyspace, err := getKeyspaceMetadata(s.session, keyspaceName) - if err != nil { - return err - } - tables, err := getTableMetadata(s.session, keyspaceName) - if err != nil { - return err - } - columns, err := getColumnMetadata(s.session, keyspaceName) - if err != nil { - return err - } - - // organize the schema data - compileMetadata(s.session.cfg.ProtoVersion, keyspace, tables, columns) - - // update the cache - s.cache[keyspaceName] = keyspace - - return nil -} - -// "compiles" derived information about keyspace, table, and column metadata -// for a keyspace from the basic queried metadata objects returned by -// getKeyspaceMetadata, getTableMetadata, and getColumnMetadata respectively; -// Links the metadata objects together and derives the column composition of -// the partition key and clustering key for a table. -func compileMetadata( - protoVersion int, - keyspace *KeyspaceMetadata, - tables []TableMetadata, - columns []ColumnMetadata, -) { - keyspace.Tables = make(map[string]*TableMetadata) - for i := range tables { - tables[i].Columns = make(map[string]*ColumnMetadata) - - keyspace.Tables[tables[i].Name] = &tables[i] - } - - // add columns from the schema data - for i := range columns { - col := &columns[i] - // decode the validator for TypeInfo and order - if col.ClusteringOrder != "" { // Cassandra 3.x+ - col.Type = getCassandraType(col.Validator) - col.Order = ASC - if col.ClusteringOrder == "desc" { - col.Order = DESC - } - } else { - validatorParsed := parseType(col.Validator) - col.Type = validatorParsed.types[0] - col.Order = ASC - if validatorParsed.reversed[0] { - col.Order = DESC - } - } - - table := keyspace.Tables[col.Table] - table.Columns[col.Name] = col - table.OrderedColumns = append(table.OrderedColumns, col.Name) - } - - if protoVersion == protoVersion1 { - compileV1Metadata(tables) - } else { - compileV2Metadata(tables) - } -} - -// Compiles derived information from TableMetadata which have had -// ColumnMetadata added already. V1 protocol does not return as much -// column metadata as V2+ (because V1 doesn't support the "type" column in the -// system.schema_columns table) so determining PartitionKey and ClusterColumns -// is more complex. -func compileV1Metadata(tables []TableMetadata) { - for i := range tables { - table := &tables[i] - - // decode the key validator - keyValidatorParsed := parseType(table.KeyValidator) - // decode the comparator - comparatorParsed := parseType(table.Comparator) - - // the partition key length is the same as the number of types in the - // key validator - table.PartitionKey = make([]*ColumnMetadata, len(keyValidatorParsed.types)) - - // V1 protocol only returns "regular" columns from - // system.schema_columns (there is no type field for columns) - // so the alias information is used to - // create the partition key and clustering columns - - // construct the partition key from the alias - for i := range table.PartitionKey { - var alias string - if len(table.KeyAliases) > i { - alias = table.KeyAliases[i] - } else if i == 0 { - alias = DEFAULT_KEY_ALIAS - } else { - alias = DEFAULT_KEY_ALIAS + strconv.Itoa(i+1) - } - - column := &ColumnMetadata{ - Keyspace: table.Keyspace, - Table: table.Name, - Name: alias, - Type: keyValidatorParsed.types[i], - Kind: ColumnPartitionKey, - ComponentIndex: i, - } - - table.PartitionKey[i] = column - table.Columns[alias] = column - } - - // determine the number of clustering columns - size := len(comparatorParsed.types) - if comparatorParsed.isComposite { - if len(comparatorParsed.collections) != 0 || - (len(table.ColumnAliases) == size-1 && - comparatorParsed.types[size-1].Type() == TypeVarchar) { - size = size - 1 - } - } else { - if !(len(table.ColumnAliases) != 0 || len(table.Columns) == 0) { - size = 0 - } - } - - table.ClusteringColumns = make([]*ColumnMetadata, size) - - for i := range table.ClusteringColumns { - var alias string - if len(table.ColumnAliases) > i { - alias = table.ColumnAliases[i] - } else if i == 0 { - alias = DEFAULT_COLUMN_ALIAS - } else { - alias = DEFAULT_COLUMN_ALIAS + strconv.Itoa(i+1) - } - - order := ASC - if comparatorParsed.reversed[i] { - order = DESC - } - - column := &ColumnMetadata{ - Keyspace: table.Keyspace, - Table: table.Name, - Name: alias, - Type: comparatorParsed.types[i], - Order: order, - Kind: ColumnClusteringKey, - ComponentIndex: i, - } - - table.ClusteringColumns[i] = column - table.Columns[alias] = column - } - - if size != len(comparatorParsed.types)-1 { - alias := DEFAULT_VALUE_ALIAS - if len(table.ValueAlias) > 0 { - alias = table.ValueAlias - } - // decode the default validator - defaultValidatorParsed := parseType(table.DefaultValidator) - column := &ColumnMetadata{ - Keyspace: table.Keyspace, - Table: table.Name, - Name: alias, - Type: defaultValidatorParsed.types[0], - Kind: ColumnRegular, - } - table.Columns[alias] = column - } - } -} - -// The simpler compile case for V2+ protocol -func compileV2Metadata(tables []TableMetadata) { - for i := range tables { - table := &tables[i] - - clusteringColumnCount := componentColumnCountOfType(table.Columns, ColumnClusteringKey) - table.ClusteringColumns = make([]*ColumnMetadata, clusteringColumnCount) - - if table.KeyValidator != "" { - keyValidatorParsed := parseType(table.KeyValidator) - table.PartitionKey = make([]*ColumnMetadata, len(keyValidatorParsed.types)) - } else { // Cassandra 3.x+ - partitionKeyCount := componentColumnCountOfType(table.Columns, ColumnPartitionKey) - table.PartitionKey = make([]*ColumnMetadata, partitionKeyCount) - } - - for _, columnName := range table.OrderedColumns { - column := table.Columns[columnName] - if column.Kind == ColumnPartitionKey { - table.PartitionKey[column.ComponentIndex] = column - } else if column.Kind == ColumnClusteringKey { - table.ClusteringColumns[column.ComponentIndex] = column - } - } - } -} - -// returns the count of coluns with the given "kind" value. -func componentColumnCountOfType(columns map[string]*ColumnMetadata, kind ColumnKind) int { - maxComponentIndex := -1 - for _, column := range columns { - if column.Kind == kind && column.ComponentIndex > maxComponentIndex { - maxComponentIndex = column.ComponentIndex - } - } - return maxComponentIndex + 1 -} - -// query only for the keyspace metadata for the specified keyspace from system.schema_keyspace -func getKeyspaceMetadata(session *Session, keyspaceName string) (*KeyspaceMetadata, error) { - keyspace := &KeyspaceMetadata{Name: keyspaceName} - - if session.useSystemSchema { // Cassandra 3.x+ - const stmt = ` - SELECT durable_writes, replication - FROM system_schema.keyspaces - WHERE keyspace_name = ?` - - var replication map[string]string - - iter := session.control.query(stmt, keyspaceName) - if iter.NumRows() == 0 { - return nil, ErrKeyspaceDoesNotExist - } - iter.Scan(&keyspace.DurableWrites, &replication) - err := iter.Close() - if err != nil { - return nil, fmt.Errorf("Error querying keyspace schema: %v", err) - } - - keyspace.StrategyClass = replication["class"] - delete(replication, "class") - - keyspace.StrategyOptions = make(map[string]interface{}, len(replication)) - for k, v := range replication { - keyspace.StrategyOptions[k] = v - } - } else { - - const stmt = ` - SELECT durable_writes, strategy_class, strategy_options - FROM system.schema_keyspaces - WHERE keyspace_name = ?` - - var strategyOptionsJSON []byte - - iter := session.control.query(stmt, keyspaceName) - if iter.NumRows() == 0 { - return nil, ErrKeyspaceDoesNotExist - } - iter.Scan(&keyspace.DurableWrites, &keyspace.StrategyClass, &strategyOptionsJSON) - err := iter.Close() - if err != nil { - return nil, fmt.Errorf("Error querying keyspace schema: %v", err) - } - - err = json.Unmarshal(strategyOptionsJSON, &keyspace.StrategyOptions) - if err != nil { - return nil, fmt.Errorf( - "Invalid JSON value '%s' as strategy_options for in keyspace '%s': %v", - strategyOptionsJSON, keyspace.Name, err, - ) - } - } - - return keyspace, nil -} - -// query for only the table metadata in the specified keyspace from system.schema_columnfamilies -func getTableMetadata(session *Session, keyspaceName string) ([]TableMetadata, error) { - - var ( - iter *Iter - scan func(iter *Iter, table *TableMetadata) bool - stmt string - - keyAliasesJSON []byte - columnAliasesJSON []byte - ) - - if session.useSystemSchema { // Cassandra 3.x+ - stmt = ` - SELECT - table_name - FROM system_schema.tables - WHERE keyspace_name = ?` - - switchIter := func() *Iter { - iter.Close() - stmt = ` - SELECT - view_name - FROM system_schema.views - WHERE keyspace_name = ?` - iter = session.control.query(stmt, keyspaceName) - return iter - } - - scan = func(iter *Iter, table *TableMetadata) bool { - r := iter.Scan( - &table.Name, - ) - if !r { - iter = switchIter() - if iter != nil { - switchIter = func() *Iter { return nil } - r = iter.Scan(&table.Name) - } - } - return r - } - } else if session.cfg.ProtoVersion == protoVersion1 { - // we have key aliases - stmt = ` - SELECT - columnfamily_name, - key_validator, - comparator, - default_validator, - key_aliases, - column_aliases, - value_alias - FROM system.schema_columnfamilies - WHERE keyspace_name = ?` - - scan = func(iter *Iter, table *TableMetadata) bool { - return iter.Scan( - &table.Name, - &table.KeyValidator, - &table.Comparator, - &table.DefaultValidator, - &keyAliasesJSON, - &columnAliasesJSON, - &table.ValueAlias, - ) - } - } else { - stmt = ` - SELECT - columnfamily_name, - key_validator, - comparator, - default_validator - FROM system.schema_columnfamilies - WHERE keyspace_name = ?` - - scan = func(iter *Iter, table *TableMetadata) bool { - return iter.Scan( - &table.Name, - &table.KeyValidator, - &table.Comparator, - &table.DefaultValidator, - ) - } - } - - iter = session.control.query(stmt, keyspaceName) - - tables := []TableMetadata{} - table := TableMetadata{Keyspace: keyspaceName} - - for scan(iter, &table) { - var err error - - // decode the key aliases - if keyAliasesJSON != nil { - table.KeyAliases = []string{} - err = json.Unmarshal(keyAliasesJSON, &table.KeyAliases) - if err != nil { - iter.Close() - return nil, fmt.Errorf( - "Invalid JSON value '%s' as key_aliases for in table '%s': %v", - keyAliasesJSON, table.Name, err, - ) - } - } - - // decode the column aliases - if columnAliasesJSON != nil { - table.ColumnAliases = []string{} - err = json.Unmarshal(columnAliasesJSON, &table.ColumnAliases) - if err != nil { - iter.Close() - return nil, fmt.Errorf( - "Invalid JSON value '%s' as column_aliases for in table '%s': %v", - columnAliasesJSON, table.Name, err, - ) - } - } - - tables = append(tables, table) - table = TableMetadata{Keyspace: keyspaceName} - } - - err := iter.Close() - if err != nil && err != ErrNotFound { - return nil, fmt.Errorf("Error querying table schema: %v", err) - } - - return tables, nil -} - -func (s *Session) scanColumnMetadataV1(keyspace string) ([]ColumnMetadata, error) { - // V1 does not support the type column, and all returned rows are - // of kind "regular". - const stmt = ` - SELECT - columnfamily_name, - column_name, - component_index, - validator, - index_name, - index_type, - index_options - FROM system.schema_columns - WHERE keyspace_name = ?` - - var columns []ColumnMetadata - - rows := s.control.query(stmt, keyspace).Scanner() - for rows.Next() { - var ( - column = ColumnMetadata{Keyspace: keyspace} - indexOptionsJSON []byte - ) - - // all columns returned by V1 are regular - column.Kind = ColumnRegular - - err := rows.Scan(&column.Table, - &column.Name, - &column.ComponentIndex, - &column.Validator, - &column.Index.Name, - &column.Index.Type, - &indexOptionsJSON) - - if err != nil { - return nil, err - } - - if len(indexOptionsJSON) > 0 { - err := json.Unmarshal(indexOptionsJSON, &column.Index.Options) - if err != nil { - return nil, fmt.Errorf( - "Invalid JSON value '%s' as index_options for column '%s' in table '%s': %v", - indexOptionsJSON, - column.Name, - column.Table, - err) - } - } - - columns = append(columns, column) - } - - if err := rows.Err(); err != nil { - return nil, err - } - - return columns, nil -} - -func (s *Session) scanColumnMetadataV2(keyspace string) ([]ColumnMetadata, error) { - // V2+ supports the type column - const stmt = ` - SELECT - columnfamily_name, - column_name, - component_index, - validator, - index_name, - index_type, - index_options, - type - FROM system.schema_columns - WHERE keyspace_name = ?` - - var columns []ColumnMetadata - - rows := s.control.query(stmt, keyspace).Scanner() - for rows.Next() { - var ( - column = ColumnMetadata{Keyspace: keyspace} - indexOptionsJSON []byte - ) - - err := rows.Scan(&column.Table, - &column.Name, - &column.ComponentIndex, - &column.Validator, - &column.Index.Name, - &column.Index.Type, - &indexOptionsJSON, - &column.Kind, - ) - - if err != nil { - return nil, err - } - - if len(indexOptionsJSON) > 0 { - err := json.Unmarshal(indexOptionsJSON, &column.Index.Options) - if err != nil { - return nil, fmt.Errorf( - "Invalid JSON value '%s' as index_options for column '%s' in table '%s': %v", - indexOptionsJSON, - column.Name, - column.Table, - err) - } - } - - columns = append(columns, column) - } - - if err := rows.Err(); err != nil { - return nil, err - } - - return columns, nil - -} - -func (s *Session) scanColumnMetadataSystem(keyspace string) ([]ColumnMetadata, error) { - const stmt = ` - SELECT - table_name, - column_name, - clustering_order, - type, - kind, - position - FROM system_schema.columns - WHERE keyspace_name = ?` - - var columns []ColumnMetadata - - rows := s.control.query(stmt, keyspace).Scanner() - for rows.Next() { - column := ColumnMetadata{Keyspace: keyspace} - - err := rows.Scan(&column.Table, - &column.Name, - &column.ClusteringOrder, - &column.Validator, - &column.Kind, - &column.ComponentIndex, - ) - - if err != nil { - return nil, err - } - - columns = append(columns, column) - } - - if err := rows.Err(); err != nil { - return nil, err - } - - // TODO(zariel): get column index info from system_schema.indexes - - return columns, nil -} - -// query for only the column metadata in the specified keyspace from system.schema_columns -func getColumnMetadata(session *Session, keyspaceName string) ([]ColumnMetadata, error) { - var ( - columns []ColumnMetadata - err error - ) - - // Deal with differences in protocol versions - if session.cfg.ProtoVersion == 1 { - columns, err = session.scanColumnMetadataV1(keyspaceName) - } else if session.useSystemSchema { // Cassandra 3.x+ - columns, err = session.scanColumnMetadataSystem(keyspaceName) - } else { - columns, err = session.scanColumnMetadataV2(keyspaceName) - } - - if err != nil && err != ErrNotFound { - return nil, fmt.Errorf("Error querying column schema: %v", err) - } - - return columns, nil -} - -// type definition parser state -type typeParser struct { - input string - index int -} - -// the type definition parser result -type typeParserResult struct { - isComposite bool - types []TypeInfo - reversed []bool - collections map[string]TypeInfo -} - -// Parse the type definition used for validator and comparator schema data -func parseType(def string) typeParserResult { - parser := &typeParser{input: def} - return parser.parse() -} - -const ( - REVERSED_TYPE = "org.apache.cassandra.db.marshal.ReversedType" - COMPOSITE_TYPE = "org.apache.cassandra.db.marshal.CompositeType" - COLLECTION_TYPE = "org.apache.cassandra.db.marshal.ColumnToCollectionType" - LIST_TYPE = "org.apache.cassandra.db.marshal.ListType" - SET_TYPE = "org.apache.cassandra.db.marshal.SetType" - MAP_TYPE = "org.apache.cassandra.db.marshal.MapType" -) - -// represents a class specification in the type def AST -type typeParserClassNode struct { - name string - params []typeParserParamNode - // this is the segment of the input string that defined this node - input string -} - -// represents a class parameter in the type def AST -type typeParserParamNode struct { - name *string - class typeParserClassNode -} - -func (t *typeParser) parse() typeParserResult { - // parse the AST - ast, ok := t.parseClassNode() - if !ok { - // treat this is a custom type - return typeParserResult{ - isComposite: false, - types: []TypeInfo{ - NativeType{ - typ: TypeCustom, - custom: t.input, - }, - }, - reversed: []bool{false}, - collections: nil, - } - } - - // interpret the AST - if strings.HasPrefix(ast.name, COMPOSITE_TYPE) { - count := len(ast.params) - - // look for a collections param - last := ast.params[count-1] - collections := map[string]TypeInfo{} - if strings.HasPrefix(last.class.name, COLLECTION_TYPE) { - count-- - - for _, param := range last.class.params { - // decode the name - var name string - decoded, err := hex.DecodeString(*param.name) - if err != nil { - Logger.Printf( - "Error parsing type '%s', contains collection name '%s' with an invalid format: %v", - t.input, - *param.name, - err, - ) - // just use the provided name - name = *param.name - } else { - name = string(decoded) - } - collections[name] = param.class.asTypeInfo() - } - } - - types := make([]TypeInfo, count) - reversed := make([]bool, count) - - for i, param := range ast.params[:count] { - class := param.class - reversed[i] = strings.HasPrefix(class.name, REVERSED_TYPE) - if reversed[i] { - class = class.params[0].class - } - types[i] = class.asTypeInfo() - } - - return typeParserResult{ - isComposite: true, - types: types, - reversed: reversed, - collections: collections, - } - } else { - // not composite, so one type - class := *ast - reversed := strings.HasPrefix(class.name, REVERSED_TYPE) - if reversed { - class = class.params[0].class - } - typeInfo := class.asTypeInfo() - - return typeParserResult{ - isComposite: false, - types: []TypeInfo{typeInfo}, - reversed: []bool{reversed}, - } - } -} - -func (class *typeParserClassNode) asTypeInfo() TypeInfo { - if strings.HasPrefix(class.name, LIST_TYPE) { - elem := class.params[0].class.asTypeInfo() - return CollectionType{ - NativeType: NativeType{ - typ: TypeList, - }, - Elem: elem, - } - } - if strings.HasPrefix(class.name, SET_TYPE) { - elem := class.params[0].class.asTypeInfo() - return CollectionType{ - NativeType: NativeType{ - typ: TypeSet, - }, - Elem: elem, - } - } - if strings.HasPrefix(class.name, MAP_TYPE) { - key := class.params[0].class.asTypeInfo() - elem := class.params[1].class.asTypeInfo() - return CollectionType{ - NativeType: NativeType{ - typ: TypeMap, - }, - Key: key, - Elem: elem, - } - } - - // must be a simple type or custom type - info := NativeType{typ: getApacheCassandraType(class.name)} - if info.typ == TypeCustom { - // add the entire class definition - info.custom = class.input - } - return info -} - -// CLASS := ID [ PARAMS ] -func (t *typeParser) parseClassNode() (node *typeParserClassNode, ok bool) { - t.skipWhitespace() - - startIndex := t.index - - name, ok := t.nextIdentifier() - if !ok { - return nil, false - } - - params, ok := t.parseParamNodes() - if !ok { - return nil, false - } - - endIndex := t.index - - node = &typeParserClassNode{ - name: name, - params: params, - input: t.input[startIndex:endIndex], - } - return node, true -} - -// PARAMS := "(" PARAM { "," PARAM } ")" -// PARAM := [ PARAM_NAME ":" ] CLASS -// PARAM_NAME := ID -func (t *typeParser) parseParamNodes() (params []typeParserParamNode, ok bool) { - t.skipWhitespace() - - // the params are optional - if t.index == len(t.input) || t.input[t.index] != '(' { - return nil, true - } - - params = []typeParserParamNode{} - - // consume the '(' - t.index++ - - t.skipWhitespace() - - for t.input[t.index] != ')' { - // look for a named param, but if no colon, then we want to backup - backupIndex := t.index - - // name will be a hex encoded version of a utf-8 string - name, ok := t.nextIdentifier() - if !ok { - return nil, false - } - hasName := true - - // TODO handle '=>' used for DynamicCompositeType - - t.skipWhitespace() - - if t.input[t.index] == ':' { - // there is a name for this parameter - - // consume the ':' - t.index++ - - t.skipWhitespace() - } else { - // no name, backup - hasName = false - t.index = backupIndex - } - - // parse the next full parameter - classNode, ok := t.parseClassNode() - if !ok { - return nil, false - } - - if hasName { - params = append( - params, - typeParserParamNode{name: &name, class: *classNode}, - ) - } else { - params = append( - params, - typeParserParamNode{class: *classNode}, - ) - } - - t.skipWhitespace() - - if t.input[t.index] == ',' { - // consume the comma - t.index++ - - t.skipWhitespace() - } - } - - // consume the ')' - t.index++ - - return params, true -} - -func (t *typeParser) skipWhitespace() { - for t.index < len(t.input) && isWhitespaceChar(t.input[t.index]) { - t.index++ - } -} - -func isWhitespaceChar(c byte) bool { - return c == ' ' || c == '\n' || c == '\t' -} - -// ID := LETTER { LETTER } -// LETTER := "0"..."9" | "a"..."z" | "A"..."Z" | "-" | "+" | "." | "_" | "&" -func (t *typeParser) nextIdentifier() (id string, found bool) { - startIndex := t.index - for t.index < len(t.input) && isIdentifierChar(t.input[t.index]) { - t.index++ - } - if startIndex == t.index { - return "", false - } - return t.input[startIndex:t.index], true -} - -func isIdentifierChar(c byte) bool { - return (c >= '0' && c <= '9') || - (c >= 'a' && c <= 'z') || - (c >= 'A' && c <= 'Z') || - c == '-' || - c == '+' || - c == '.' || - c == '_' || - c == '&' -} diff --git a/vendor/github.com/gocql/gocql/policies.go b/vendor/github.com/gocql/gocql/policies.go deleted file mode 100644 index 10ce45b238..0000000000 --- a/vendor/github.com/gocql/gocql/policies.go +++ /dev/null @@ -1,708 +0,0 @@ -// Copyright (c) 2012 The gocql Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -//This file will be the future home for more policies -package gocql - -import ( - "fmt" - "math" - "math/rand" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/hailocab/go-hostpool" -) - -// cowHostList implements a copy on write host list, its equivalent type is []*HostInfo -type cowHostList struct { - list atomic.Value - mu sync.Mutex -} - -func (c *cowHostList) String() string { - return fmt.Sprintf("%+v", c.get()) -} - -func (c *cowHostList) get() []*HostInfo { - // TODO(zariel): should we replace this with []*HostInfo? - l, ok := c.list.Load().(*[]*HostInfo) - if !ok { - return nil - } - return *l -} - -func (c *cowHostList) set(list []*HostInfo) { - c.mu.Lock() - c.list.Store(&list) - c.mu.Unlock() -} - -// add will add a host if it not already in the list -func (c *cowHostList) add(host *HostInfo) bool { - c.mu.Lock() - l := c.get() - - if n := len(l); n == 0 { - l = []*HostInfo{host} - } else { - newL := make([]*HostInfo, n+1) - for i := 0; i < n; i++ { - if host.Equal(l[i]) { - c.mu.Unlock() - return false - } - newL[i] = l[i] - } - newL[n] = host - l = newL - } - - c.list.Store(&l) - c.mu.Unlock() - return true -} - -func (c *cowHostList) update(host *HostInfo) { - c.mu.Lock() - l := c.get() - - if len(l) == 0 { - c.mu.Unlock() - return - } - - found := false - newL := make([]*HostInfo, len(l)) - for i := range l { - if host.Equal(l[i]) { - newL[i] = host - found = true - } else { - newL[i] = l[i] - } - } - - if found { - c.list.Store(&newL) - } - - c.mu.Unlock() -} - -func (c *cowHostList) remove(ip net.IP) bool { - c.mu.Lock() - l := c.get() - size := len(l) - if size == 0 { - c.mu.Unlock() - return false - } - - found := false - newL := make([]*HostInfo, 0, size) - for i := 0; i < len(l); i++ { - if !l[i].ConnectAddress().Equal(ip) { - newL = append(newL, l[i]) - } else { - found = true - } - } - - if !found { - c.mu.Unlock() - return false - } - - newL = newL[:size-1 : size-1] - c.list.Store(&newL) - c.mu.Unlock() - - return true -} - -// RetryableQuery is an interface that represents a query or batch statement that -// exposes the correct functions for the retry policy logic to evaluate correctly. -type RetryableQuery interface { - Attempts() int - GetConsistency() Consistency -} - -// RetryPolicy interface is used by gocql to determine if a query can be attempted -// again after a retryable error has been received. The interface allows gocql -// users to implement their own logic to determine if a query can be attempted -// again. -// -// See SimpleRetryPolicy as an example of implementing and using a RetryPolicy -// interface. -type RetryPolicy interface { - Attempt(RetryableQuery) bool -} - -// SimpleRetryPolicy has simple logic for attempting a query a fixed number of times. -// -// See below for examples of usage: -// -// //Assign to the cluster -// cluster.RetryPolicy = &gocql.SimpleRetryPolicy{NumRetries: 3} -// -// //Assign to a query -// query.RetryPolicy(&gocql.SimpleRetryPolicy{NumRetries: 1}) -// -type SimpleRetryPolicy struct { - NumRetries int //Number of times to retry a query -} - -// Attempt tells gocql to attempt the query again based on query.Attempts being less -// than the NumRetries defined in the policy. -func (s *SimpleRetryPolicy) Attempt(q RetryableQuery) bool { - return q.Attempts() <= s.NumRetries -} - -// ExponentialBackoffRetryPolicy sleeps between attempts -type ExponentialBackoffRetryPolicy struct { - NumRetries int - Min, Max time.Duration -} - -func (e *ExponentialBackoffRetryPolicy) Attempt(q RetryableQuery) bool { - if q.Attempts() > e.NumRetries { - return false - } - time.Sleep(e.napTime(q.Attempts())) - return true -} - -func (e *ExponentialBackoffRetryPolicy) napTime(attempts int) time.Duration { - if e.Min <= 0 { - e.Min = 100 * time.Millisecond - } - if e.Max <= 0 { - e.Max = 10 * time.Second - } - minFloat := float64(e.Min) - napDuration := minFloat * math.Pow(2, float64(attempts-1)) - // add some jitter - napDuration += rand.Float64()*minFloat - (minFloat / 2) - if napDuration > float64(e.Max) { - return time.Duration(e.Max) - } - return time.Duration(napDuration) -} - -type HostStateNotifier interface { - AddHost(host *HostInfo) - RemoveHost(host *HostInfo) - HostUp(host *HostInfo) - HostDown(host *HostInfo) -} - -type KeyspaceUpdateEvent struct { - Keyspace string - Change string -} - -// HostSelectionPolicy is an interface for selecting -// the most appropriate host to execute a given query. -type HostSelectionPolicy interface { - HostStateNotifier - SetPartitioner - KeyspaceChanged(KeyspaceUpdateEvent) - Init(*Session) - IsLocal(host *HostInfo) bool - //Pick returns an iteration function over selected hosts - Pick(ExecutableQuery) NextHost -} - -// SelectedHost is an interface returned when picking a host from a host -// selection policy. -type SelectedHost interface { - Info() *HostInfo - Mark(error) -} - -type selectedHost HostInfo - -func (host *selectedHost) Info() *HostInfo { - return (*HostInfo)(host) -} - -func (host *selectedHost) Mark(err error) {} - -// NextHost is an iteration function over picked hosts -type NextHost func() SelectedHost - -// RoundRobinHostPolicy is a round-robin load balancing policy, where each host -// is tried sequentially for each query. -func RoundRobinHostPolicy() HostSelectionPolicy { - return &roundRobinHostPolicy{} -} - -type roundRobinHostPolicy struct { - hosts cowHostList - pos uint32 - mu sync.RWMutex -} - -func (r *roundRobinHostPolicy) IsLocal(*HostInfo) bool { return true } -func (r *roundRobinHostPolicy) KeyspaceChanged(KeyspaceUpdateEvent) {} -func (r *roundRobinHostPolicy) SetPartitioner(partitioner string) {} -func (r *roundRobinHostPolicy) Init(*Session) {} - -func (r *roundRobinHostPolicy) Pick(qry ExecutableQuery) NextHost { - // i is used to limit the number of attempts to find a host - // to the number of hosts known to this policy - var i int - return func() SelectedHost { - hosts := r.hosts.get() - if len(hosts) == 0 { - return nil - } - - // always increment pos to evenly distribute traffic in case of - // failures - pos := atomic.AddUint32(&r.pos, 1) - 1 - if i >= len(hosts) { - return nil - } - host := hosts[(pos)%uint32(len(hosts))] - i++ - return (*selectedHost)(host) - } -} - -func (r *roundRobinHostPolicy) AddHost(host *HostInfo) { - r.hosts.add(host) -} - -func (r *roundRobinHostPolicy) RemoveHost(host *HostInfo) { - r.hosts.remove(host.ConnectAddress()) -} - -func (r *roundRobinHostPolicy) HostUp(host *HostInfo) { - r.AddHost(host) -} - -func (r *roundRobinHostPolicy) HostDown(host *HostInfo) { - r.RemoveHost(host) -} - -func ShuffleReplicas() func(*tokenAwareHostPolicy) { - return func(t *tokenAwareHostPolicy) { - t.shuffleReplicas = true - } -} - -// TokenAwareHostPolicy is a token aware host selection policy, where hosts are -// selected based on the partition key, so queries are sent to the host which -// owns the partition. Fallback is used when routing information is not available. -func TokenAwareHostPolicy(fallback HostSelectionPolicy, opts ...func(*tokenAwareHostPolicy)) HostSelectionPolicy { - p := &tokenAwareHostPolicy{fallback: fallback} - for _, opt := range opts { - opt(p) - } - return p -} - -type keyspaceMeta struct { - replicas map[string]map[token][]*HostInfo -} - -type tokenAwareHostPolicy struct { - hosts cowHostList - mu sync.RWMutex - partitioner string - fallback HostSelectionPolicy - session *Session - - tokenRing atomic.Value // *tokenRing - keyspaces atomic.Value // *keyspaceMeta - - shuffleReplicas bool -} - -func (t *tokenAwareHostPolicy) Init(s *Session) { - t.session = s -} - -func (t *tokenAwareHostPolicy) IsLocal(host *HostInfo) bool { - return t.fallback.IsLocal(host) -} - -func (t *tokenAwareHostPolicy) KeyspaceChanged(update KeyspaceUpdateEvent) { - meta, _ := t.keyspaces.Load().(*keyspaceMeta) - var size = 1 - if meta != nil { - size = len(meta.replicas) - } - - newMeta := &keyspaceMeta{ - replicas: make(map[string]map[token][]*HostInfo, size), - } - - ks, err := t.session.KeyspaceMetadata(update.Keyspace) - if err == nil { - strat := getStrategy(ks) - tr := t.tokenRing.Load().(*tokenRing) - if tr != nil { - newMeta.replicas[update.Keyspace] = strat.replicaMap(t.hosts.get(), tr.tokens) - } - } - - if meta != nil { - for ks, replicas := range meta.replicas { - if ks != update.Keyspace { - newMeta.replicas[ks] = replicas - } - } - } - - t.keyspaces.Store(newMeta) -} - -func (t *tokenAwareHostPolicy) SetPartitioner(partitioner string) { - t.mu.Lock() - defer t.mu.Unlock() - - if t.partitioner != partitioner { - t.fallback.SetPartitioner(partitioner) - t.partitioner = partitioner - - t.resetTokenRing(partitioner) - } -} - -func (t *tokenAwareHostPolicy) AddHost(host *HostInfo) { - t.hosts.add(host) - t.fallback.AddHost(host) - - t.mu.RLock() - partitioner := t.partitioner - t.mu.RUnlock() - t.resetTokenRing(partitioner) -} - -func (t *tokenAwareHostPolicy) RemoveHost(host *HostInfo) { - t.hosts.remove(host.ConnectAddress()) - t.fallback.RemoveHost(host) - - t.mu.RLock() - partitioner := t.partitioner - t.mu.RUnlock() - t.resetTokenRing(partitioner) -} - -func (t *tokenAwareHostPolicy) HostUp(host *HostInfo) { - // TODO: need to avoid doing all the work on AddHost on hostup/down - // because it now expensive to calculate the replica map for each - // token - t.AddHost(host) -} - -func (t *tokenAwareHostPolicy) HostDown(host *HostInfo) { - t.RemoveHost(host) -} - -func (t *tokenAwareHostPolicy) resetTokenRing(partitioner string) { - if partitioner == "" { - // partitioner not yet set - return - } - - // create a new token ring - hosts := t.hosts.get() - tokenRing, err := newTokenRing(partitioner, hosts) - if err != nil { - Logger.Printf("Unable to update the token ring due to error: %s", err) - return - } - - // replace the token ring - t.tokenRing.Store(tokenRing) -} - -func (t *tokenAwareHostPolicy) getReplicas(keyspace string, token token) ([]*HostInfo, bool) { - meta, _ := t.keyspaces.Load().(*keyspaceMeta) - if meta == nil { - return nil, false - } - tokens, ok := meta.replicas[keyspace][token] - return tokens, ok -} - -func (t *tokenAwareHostPolicy) Pick(qry ExecutableQuery) NextHost { - if qry == nil { - return t.fallback.Pick(qry) - } - - routingKey, err := qry.GetRoutingKey() - if err != nil { - return t.fallback.Pick(qry) - } else if routingKey == nil { - return t.fallback.Pick(qry) - } - - tr, _ := t.tokenRing.Load().(*tokenRing) - if tr == nil { - return t.fallback.Pick(qry) - } - - token := tr.partitioner.Hash(routingKey) - primaryEndpoint := tr.GetHostForToken(token) - - if primaryEndpoint == nil || token == nil { - return t.fallback.Pick(qry) - } - - replicas, ok := t.getReplicas(qry.Keyspace(), token) - if !ok { - replicas = []*HostInfo{primaryEndpoint} - } else if t.shuffleReplicas { - replicas = shuffleHosts(replicas) - } - - var ( - fallbackIter NextHost - i int - ) - - used := make(map[*HostInfo]bool, len(replicas)) - return func() SelectedHost { - for i < len(replicas) { - h := replicas[i] - i++ - - if h.IsUp() && t.fallback.IsLocal(h) { - used[h] = true - return (*selectedHost)(h) - } - } - - if fallbackIter == nil { - // fallback - fallbackIter = t.fallback.Pick(qry) - } - - // filter the token aware selected hosts from the fallback hosts - for fallbackHost := fallbackIter(); fallbackHost != nil; fallbackHost = fallbackIter() { - if !used[fallbackHost.Info()] { - return fallbackHost - } - } - return nil - } -} - -// HostPoolHostPolicy is a host policy which uses the bitly/go-hostpool library -// to distribute queries between hosts and prevent sending queries to -// unresponsive hosts. When creating the host pool that is passed to the policy -// use an empty slice of hosts as the hostpool will be populated later by gocql. -// See below for examples of usage: -// -// // Create host selection policy using a simple host pool -// cluster.PoolConfig.HostSelectionPolicy = HostPoolHostPolicy(hostpool.New(nil)) -// -// // Create host selection policy using an epsilon greedy pool -// cluster.PoolConfig.HostSelectionPolicy = HostPoolHostPolicy( -// hostpool.NewEpsilonGreedy(nil, 0, &hostpool.LinearEpsilonValueCalculator{}), -// ) -// -func HostPoolHostPolicy(hp hostpool.HostPool) HostSelectionPolicy { - return &hostPoolHostPolicy{hostMap: map[string]*HostInfo{}, hp: hp} -} - -type hostPoolHostPolicy struct { - hp hostpool.HostPool - mu sync.RWMutex - hostMap map[string]*HostInfo -} - -func (r *hostPoolHostPolicy) Init(*Session) {} -func (r *hostPoolHostPolicy) KeyspaceChanged(KeyspaceUpdateEvent) {} -func (r *hostPoolHostPolicy) SetPartitioner(string) {} -func (r *hostPoolHostPolicy) IsLocal(*HostInfo) bool { return true } - -func (r *hostPoolHostPolicy) SetHosts(hosts []*HostInfo) { - peers := make([]string, len(hosts)) - hostMap := make(map[string]*HostInfo, len(hosts)) - - for i, host := range hosts { - ip := host.ConnectAddress().String() - peers[i] = ip - hostMap[ip] = host - } - - r.mu.Lock() - r.hp.SetHosts(peers) - r.hostMap = hostMap - r.mu.Unlock() -} - -func (r *hostPoolHostPolicy) AddHost(host *HostInfo) { - ip := host.ConnectAddress().String() - - r.mu.Lock() - defer r.mu.Unlock() - - // If the host addr is present and isn't nil return - if h, ok := r.hostMap[ip]; ok && h != nil { - return - } - // otherwise, add the host to the map - r.hostMap[ip] = host - // and construct a new peer list to give to the HostPool - hosts := make([]string, 0, len(r.hostMap)) - for addr := range r.hostMap { - hosts = append(hosts, addr) - } - - r.hp.SetHosts(hosts) -} - -func (r *hostPoolHostPolicy) RemoveHost(host *HostInfo) { - ip := host.ConnectAddress().String() - - r.mu.Lock() - defer r.mu.Unlock() - - if _, ok := r.hostMap[ip]; !ok { - return - } - - delete(r.hostMap, ip) - hosts := make([]string, 0, len(r.hostMap)) - for _, host := range r.hostMap { - hosts = append(hosts, host.ConnectAddress().String()) - } - - r.hp.SetHosts(hosts) -} - -func (r *hostPoolHostPolicy) HostUp(host *HostInfo) { - r.AddHost(host) -} - -func (r *hostPoolHostPolicy) HostDown(host *HostInfo) { - r.RemoveHost(host) -} - -func (r *hostPoolHostPolicy) Pick(qry ExecutableQuery) NextHost { - return func() SelectedHost { - r.mu.RLock() - defer r.mu.RUnlock() - - if len(r.hostMap) == 0 { - return nil - } - - hostR := r.hp.Get() - host, ok := r.hostMap[hostR.Host()] - if !ok { - return nil - } - - return selectedHostPoolHost{ - policy: r, - info: host, - hostR: hostR, - } - } -} - -// selectedHostPoolHost is a host returned by the hostPoolHostPolicy and -// implements the SelectedHost interface -type selectedHostPoolHost struct { - policy *hostPoolHostPolicy - info *HostInfo - hostR hostpool.HostPoolResponse -} - -func (host selectedHostPoolHost) Info() *HostInfo { - return host.info -} - -func (host selectedHostPoolHost) Mark(err error) { - ip := host.info.ConnectAddress().String() - - host.policy.mu.RLock() - defer host.policy.mu.RUnlock() - - if _, ok := host.policy.hostMap[ip]; !ok { - // host was removed between pick and mark - return - } - - host.hostR.Mark(err) -} - -type dcAwareRR struct { - local string - pos uint32 - mu sync.RWMutex - localHosts cowHostList - remoteHosts cowHostList -} - -// DCAwareRoundRobinPolicy is a host selection policies which will prioritize and -// return hosts which are in the local datacentre before returning hosts in all -// other datercentres -func DCAwareRoundRobinPolicy(localDC string) HostSelectionPolicy { - return &dcAwareRR{local: localDC} -} - -func (d *dcAwareRR) Init(*Session) {} -func (d *dcAwareRR) KeyspaceChanged(KeyspaceUpdateEvent) {} -func (d *dcAwareRR) SetPartitioner(p string) {} - -func (d *dcAwareRR) IsLocal(host *HostInfo) bool { - return host.DataCenter() == d.local -} - -func (d *dcAwareRR) AddHost(host *HostInfo) { - if host.DataCenter() == d.local { - d.localHosts.add(host) - } else { - d.remoteHosts.add(host) - } -} - -func (d *dcAwareRR) RemoveHost(host *HostInfo) { - if host.DataCenter() == d.local { - d.localHosts.remove(host.ConnectAddress()) - } else { - d.remoteHosts.remove(host.ConnectAddress()) - } -} - -func (d *dcAwareRR) HostUp(host *HostInfo) { d.AddHost(host) } -func (d *dcAwareRR) HostDown(host *HostInfo) { d.RemoveHost(host) } - -func (d *dcAwareRR) Pick(q ExecutableQuery) NextHost { - var i int - return func() SelectedHost { - var hosts []*HostInfo - localHosts := d.localHosts.get() - remoteHosts := d.remoteHosts.get() - if len(localHosts) != 0 { - hosts = localHosts - } else { - hosts = remoteHosts - } - if len(hosts) == 0 { - return nil - } - - // always increment pos to evenly distribute traffic in case of - // failures - pos := atomic.AddUint32(&d.pos, 1) - 1 - if i >= len(localHosts)+len(remoteHosts) { - return nil - } - host := hosts[(pos)%uint32(len(hosts))] - i++ - return (*selectedHost)(host) - } -} diff --git a/vendor/github.com/gocql/gocql/prepared_cache.go b/vendor/github.com/gocql/gocql/prepared_cache.go deleted file mode 100644 index 3c012a4bbc..0000000000 --- a/vendor/github.com/gocql/gocql/prepared_cache.go +++ /dev/null @@ -1,64 +0,0 @@ -package gocql - -import ( - "github.com/gocql/gocql/internal/lru" - "sync" -) - -const defaultMaxPreparedStmts = 1000 - -// preparedLRU is the prepared statement cache -type preparedLRU struct { - mu sync.Mutex - lru *lru.Cache -} - -// Max adjusts the maximum size of the cache and cleans up the oldest records if -// the new max is lower than the previous value. Not concurrency safe. -func (p *preparedLRU) max(max int) { - p.mu.Lock() - defer p.mu.Unlock() - - for p.lru.Len() > max { - p.lru.RemoveOldest() - } - p.lru.MaxEntries = max -} - -func (p *preparedLRU) clear() { - p.mu.Lock() - defer p.mu.Unlock() - - for p.lru.Len() > 0 { - p.lru.RemoveOldest() - } -} - -func (p *preparedLRU) add(key string, val *inflightPrepare) { - p.mu.Lock() - defer p.mu.Unlock() - p.lru.Add(key, val) -} - -func (p *preparedLRU) remove(key string) bool { - p.mu.Lock() - defer p.mu.Unlock() - return p.lru.Remove(key) -} - -func (p *preparedLRU) execIfMissing(key string, fn func(lru *lru.Cache) *inflightPrepare) (*inflightPrepare, bool) { - p.mu.Lock() - defer p.mu.Unlock() - - val, ok := p.lru.Get(key) - if ok { - return val.(*inflightPrepare), true - } - - return fn(p.lru), false -} - -func (p *preparedLRU) keyFor(addr, keyspace, statement string) string { - // TODO: maybe use []byte for keys? - return addr + keyspace + statement -} diff --git a/vendor/github.com/gocql/gocql/query_executor.go b/vendor/github.com/gocql/gocql/query_executor.go deleted file mode 100644 index 7211bf71c3..0000000000 --- a/vendor/github.com/gocql/gocql/query_executor.go +++ /dev/null @@ -1,74 +0,0 @@ -package gocql - -import ( - "time" -) - -type ExecutableQuery interface { - execute(conn *Conn) *Iter - attempt(keyspace string, end, start time.Time, iter *Iter) - retryPolicy() RetryPolicy - GetRoutingKey() ([]byte, error) - Keyspace() string - RetryableQuery -} - -type queryExecutor struct { - pool *policyConnPool - policy HostSelectionPolicy -} - -func (q *queryExecutor) attemptQuery(qry ExecutableQuery, conn *Conn) *Iter { - start := time.Now() - iter := qry.execute(conn) - end := time.Now() - - qry.attempt(q.pool.keyspace, end, start, iter) - - return iter -} - -func (q *queryExecutor) executeQuery(qry ExecutableQuery) (*Iter, error) { - rt := qry.retryPolicy() - hostIter := q.policy.Pick(qry) - - var iter *Iter - for hostResponse := hostIter(); hostResponse != nil; hostResponse = hostIter() { - host := hostResponse.Info() - if host == nil || !host.IsUp() { - continue - } - - pool, ok := q.pool.getPool(host) - if !ok { - continue - } - - conn := pool.Pick() - if conn == nil { - continue - } - - iter = q.attemptQuery(qry, conn) - - // Update host - hostResponse.Mark(iter.err) - - // Exit for loop if the query was successful - if iter.err == nil { - iter.host = host - return iter, nil - } - - if rt == nil || !rt.Attempt(qry) { - // What do here? Should we just return an error here? - break - } - } - - if iter == nil { - return nil, ErrNoConnections - } - - return iter, nil -} diff --git a/vendor/github.com/gocql/gocql/ring.go b/vendor/github.com/gocql/gocql/ring.go deleted file mode 100644 index 856afae376..0000000000 --- a/vendor/github.com/gocql/gocql/ring.go +++ /dev/null @@ -1,152 +0,0 @@ -package gocql - -import ( - "fmt" - "net" - "sync" - "sync/atomic" -) - -type ring struct { - // endpoints are the set of endpoints which the driver will attempt to connect - // to in the case it can not reach any of its hosts. They are also used to boot - // strap the initial connection. - endpoints []*HostInfo - - // hosts are the set of all hosts in the cassandra ring that we know of - mu sync.RWMutex - hosts map[string]*HostInfo - - hostList []*HostInfo - pos uint32 - - // TODO: we should store the ring metadata here also. -} - -func (r *ring) rrHost() *HostInfo { - // TODO: should we filter hosts that get used here? These hosts will be used - // for the control connection, should we also provide an iterator? - r.mu.RLock() - defer r.mu.RUnlock() - if len(r.hostList) == 0 { - return nil - } - - pos := int(atomic.AddUint32(&r.pos, 1) - 1) - return r.hostList[pos%len(r.hostList)] -} - -func (r *ring) getHost(ip net.IP) *HostInfo { - r.mu.RLock() - host := r.hosts[ip.String()] - r.mu.RUnlock() - return host -} - -func (r *ring) allHosts() []*HostInfo { - r.mu.RLock() - hosts := make([]*HostInfo, 0, len(r.hosts)) - for _, host := range r.hosts { - hosts = append(hosts, host) - } - r.mu.RUnlock() - return hosts -} - -func (r *ring) currentHosts() map[string]*HostInfo { - r.mu.RLock() - hosts := make(map[string]*HostInfo, len(r.hosts)) - for k, v := range r.hosts { - hosts[k] = v - } - r.mu.RUnlock() - return hosts -} - -func (r *ring) addHost(host *HostInfo) bool { - // TODO(zariel): key all host info by HostID instead of - // ip addresses - if host.invalidConnectAddr() { - panic(fmt.Sprintf("invalid host: %v", host)) - } - ip := host.ConnectAddress().String() - - r.mu.Lock() - if r.hosts == nil { - r.hosts = make(map[string]*HostInfo) - } - - _, ok := r.hosts[ip] - if !ok { - r.hostList = append(r.hostList, host) - } - - r.hosts[ip] = host - r.mu.Unlock() - return ok -} - -func (r *ring) addOrUpdate(host *HostInfo) *HostInfo { - if existingHost, ok := r.addHostIfMissing(host); ok { - existingHost.update(host) - host = existingHost - } - return host -} - -func (r *ring) addHostIfMissing(host *HostInfo) (*HostInfo, bool) { - if host.invalidConnectAddr() { - panic(fmt.Sprintf("invalid host: %v", host)) - } - ip := host.ConnectAddress().String() - - r.mu.Lock() - if r.hosts == nil { - r.hosts = make(map[string]*HostInfo) - } - - existing, ok := r.hosts[ip] - if !ok { - r.hosts[ip] = host - existing = host - r.hostList = append(r.hostList, host) - } - r.mu.Unlock() - return existing, ok -} - -func (r *ring) removeHost(ip net.IP) bool { - r.mu.Lock() - if r.hosts == nil { - r.hosts = make(map[string]*HostInfo) - } - - k := ip.String() - _, ok := r.hosts[k] - if ok { - for i, host := range r.hostList { - if host.ConnectAddress().Equal(ip) { - r.hostList = append(r.hostList[:i], r.hostList[i+1:]...) - break - } - } - } - delete(r.hosts, k) - r.mu.Unlock() - return ok -} - -type clusterMetadata struct { - mu sync.RWMutex - partitioner string -} - -func (c *clusterMetadata) setPartitioner(partitioner string) { - c.mu.Lock() - defer c.mu.Unlock() - - if c.partitioner != partitioner { - // TODO: update other things now - c.partitioner = partitioner - } -} diff --git a/vendor/github.com/gocql/gocql/session.go b/vendor/github.com/gocql/gocql/session.go deleted file mode 100644 index 1fb2e5081e..0000000000 --- a/vendor/github.com/gocql/gocql/session.go +++ /dev/null @@ -1,1730 +0,0 @@ -// Copyright (c) 2012 The gocql Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gocql - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "net" - "strings" - "sync" - "sync/atomic" - "time" - "unicode" - - "github.com/gocql/gocql/internal/lru" -) - -// Session is the interface used by users to interact with the database. -// -// It's safe for concurrent use by multiple goroutines and a typical usage -// scenario is to have one global session object to interact with the -// whole Cassandra cluster. -// -// This type extends the Node interface by adding a convinient query builder -// and automatically sets a default consistency level on all operations -// that do not have a consistency level set. -type Session struct { - cons Consistency - pageSize int - prefetch float64 - routingKeyInfoCache routingKeyInfoLRU - schemaDescriber *schemaDescriber - trace Tracer - queryObserver QueryObserver - batchObserver BatchObserver - hostSource *ringDescriber - stmtsLRU *preparedLRU - - connCfg *ConnConfig - - executor *queryExecutor - pool *policyConnPool - policy HostSelectionPolicy - - ring ring - metadata clusterMetadata - - mu sync.RWMutex - - control *controlConn - - // event handlers - nodeEvents *eventDebouncer - schemaEvents *eventDebouncer - - // ring metadata - hosts []HostInfo - useSystemSchema bool - - cfg ClusterConfig - - quit chan struct{} - - closeMu sync.RWMutex - isClosed bool -} - -var queryPool = &sync.Pool{ - New: func() interface{} { - return new(Query) - }, -} - -func addrsToHosts(addrs []string, defaultPort int) ([]*HostInfo, error) { - var hosts []*HostInfo - for _, hostport := range addrs { - resolvedHosts, err := hostInfo(hostport, defaultPort) - if err != nil { - // Try other hosts if unable to resolve DNS name - if _, ok := err.(*net.DNSError); ok { - Logger.Printf("gocql: dns error: %v\n", err) - continue - } - return nil, err - } - - hosts = append(hosts, resolvedHosts...) - } - if len(hosts) == 0 { - return nil, errors.New("failed to resolve any of the provided hostnames") - } - return hosts, nil -} - -// NewSession wraps an existing Node. -func NewSession(cfg ClusterConfig) (*Session, error) { - // Check that hosts in the ClusterConfig is not empty - if len(cfg.Hosts) < 1 { - return nil, ErrNoHosts - } - - s := &Session{ - cons: cfg.Consistency, - prefetch: 0.25, - cfg: cfg, - pageSize: cfg.PageSize, - stmtsLRU: &preparedLRU{lru: lru.New(cfg.MaxPreparedStmts)}, - quit: make(chan struct{}), - } - - s.schemaDescriber = newSchemaDescriber(s) - - s.nodeEvents = newEventDebouncer("NodeEvents", s.handleNodeEvent) - s.schemaEvents = newEventDebouncer("SchemaEvents", s.handleSchemaEvent) - - s.routingKeyInfoCache.lru = lru.New(cfg.MaxRoutingKeyInfo) - - s.hostSource = &ringDescriber{session: s} - - if cfg.PoolConfig.HostSelectionPolicy == nil { - cfg.PoolConfig.HostSelectionPolicy = RoundRobinHostPolicy() - } - s.pool = cfg.PoolConfig.buildPool(s) - - s.policy = cfg.PoolConfig.HostSelectionPolicy - s.policy.Init(s) - - s.executor = &queryExecutor{ - pool: s.pool, - policy: cfg.PoolConfig.HostSelectionPolicy, - } - - s.queryObserver = cfg.QueryObserver - s.batchObserver = cfg.BatchObserver - - //Check the TLS Config before trying to connect to anything external - connCfg, err := connConfig(&s.cfg) - if err != nil { - //TODO: Return a typed error - return nil, fmt.Errorf("gocql: unable to create session: %v", err) - } - s.connCfg = connCfg - - if err := s.init(); err != nil { - s.Close() - if err == ErrNoConnectionsStarted { - //This error used to be generated inside NewSession & returned directly - //Forward it on up to be backwards compatible - return nil, ErrNoConnectionsStarted - } else { - // TODO(zariel): dont wrap this error in fmt.Errorf, return a typed error - return nil, fmt.Errorf("gocql: unable to create session: %v", err) - } - } - - return s, nil -} - -func (s *Session) init() error { - hosts, err := addrsToHosts(s.cfg.Hosts, s.cfg.Port) - if err != nil { - return err - } - s.ring.endpoints = hosts - - if !s.cfg.disableControlConn { - s.control = createControlConn(s) - if s.cfg.ProtoVersion == 0 { - proto, err := s.control.discoverProtocol(hosts) - if err != nil { - return fmt.Errorf("unable to discover protocol version: %v", err) - } else if proto == 0 { - return errors.New("unable to discovery protocol version") - } - - // TODO(zariel): we really only need this in 1 place - s.cfg.ProtoVersion = proto - s.connCfg.ProtoVersion = proto - } - - if err := s.control.connect(hosts); err != nil { - return err - } - - if !s.cfg.DisableInitialHostLookup { - var partitioner string - newHosts, partitioner, err := s.hostSource.GetHosts() - if err != nil { - return err - } - s.policy.SetPartitioner(partitioner) - filteredHosts := make([]*HostInfo, 0, len(newHosts)) - for _, host := range newHosts { - if !s.cfg.filterHost(host) { - filteredHosts = append(filteredHosts, host) - } - } - hosts = append(hosts, filteredHosts...) - } - } - - hostMap := make(map[string]*HostInfo, len(hosts)) - for _, host := range hosts { - hostMap[host.ConnectAddress().String()] = host - } - - for _, host := range hostMap { - host = s.ring.addOrUpdate(host) - s.addNewNode(host) - } - - // TODO(zariel): we probably dont need this any more as we verify that we - // can connect to one of the endpoints supplied by using the control conn. - // See if there are any connections in the pool - if s.cfg.ReconnectInterval > 0 { - go s.reconnectDownedHosts(s.cfg.ReconnectInterval) - } - - // If we disable the initial host lookup, we need to still check if the - // cluster is using the newer system schema or not... however, if control - // connection is disable, we really have no choice, so we just make our - // best guess... - if !s.cfg.disableControlConn && s.cfg.DisableInitialHostLookup { - newer, _ := checkSystemSchema(s.control) - s.useSystemSchema = newer - } else { - host := s.ring.rrHost() - s.useSystemSchema = host.Version().Major >= 3 - } - - if s.pool.Size() == 0 { - return ErrNoConnectionsStarted - } - - return nil -} - -func (s *Session) reconnectDownedHosts(intv time.Duration) { - reconnectTicker := time.NewTicker(intv) - defer reconnectTicker.Stop() - - for { - select { - case <-reconnectTicker.C: - hosts := s.ring.allHosts() - - // Print session.ring for debug. - if gocqlDebug { - buf := bytes.NewBufferString("Session.ring:") - for _, h := range hosts { - buf.WriteString("[" + h.ConnectAddress().String() + ":" + h.State().String() + "]") - } - Logger.Println(buf.String()) - } - - for _, h := range hosts { - if h.IsUp() { - continue - } - s.handleNodeUp(h.ConnectAddress(), h.Port(), true) - } - case <-s.quit: - return - } - } -} - -// SetConsistency sets the default consistency level for this session. This -// setting can also be changed on a per-query basis and the default value -// is Quorum. -func (s *Session) SetConsistency(cons Consistency) { - s.mu.Lock() - s.cons = cons - s.mu.Unlock() -} - -// SetPageSize sets the default page size for this session. A value <= 0 will -// disable paging. This setting can also be changed on a per-query basis. -func (s *Session) SetPageSize(n int) { - s.mu.Lock() - s.pageSize = n - s.mu.Unlock() -} - -// SetPrefetch sets the default threshold for pre-fetching new pages. If -// there are only p*pageSize rows remaining, the next page will be requested -// automatically. This value can also be changed on a per-query basis and -// the default value is 0.25. -func (s *Session) SetPrefetch(p float64) { - s.mu.Lock() - s.prefetch = p - s.mu.Unlock() -} - -// SetTrace sets the default tracer for this session. This setting can also -// be changed on a per-query basis. -func (s *Session) SetTrace(trace Tracer) { - s.mu.Lock() - s.trace = trace - s.mu.Unlock() -} - -// Query generates a new query object for interacting with the database. -// Further details of the query may be tweaked using the resulting query -// value before the query is executed. Query is automatically prepared -// if it has not previously been executed. -func (s *Session) Query(stmt string, values ...interface{}) *Query { - s.mu.RLock() - qry := queryPool.Get().(*Query) - qry.stmt = stmt - qry.values = values - qry.cons = s.cons - qry.session = s - qry.pageSize = s.pageSize - qry.trace = s.trace - qry.observer = s.queryObserver - qry.prefetch = s.prefetch - qry.rt = s.cfg.RetryPolicy - qry.serialCons = s.cfg.SerialConsistency - qry.defaultTimestamp = s.cfg.DefaultTimestamp - s.mu.RUnlock() - return qry -} - -type QueryInfo struct { - Id []byte - Args []ColumnInfo - Rval []ColumnInfo - PKeyColumns []int -} - -// Bind generates a new query object based on the query statement passed in. -// The query is automatically prepared if it has not previously been executed. -// The binding callback allows the application to define which query argument -// values will be marshalled as part of the query execution. -// During execution, the meta data of the prepared query will be routed to the -// binding callback, which is responsible for producing the query argument values. -func (s *Session) Bind(stmt string, b func(q *QueryInfo) ([]interface{}, error)) *Query { - s.mu.RLock() - qry := &Query{stmt: stmt, binding: b, cons: s.cons, - session: s, pageSize: s.pageSize, trace: s.trace, observer: s.queryObserver, - prefetch: s.prefetch, rt: s.cfg.RetryPolicy} - s.mu.RUnlock() - return qry -} - -// Close closes all connections. The session is unusable after this -// operation. -func (s *Session) Close() { - - s.closeMu.Lock() - defer s.closeMu.Unlock() - if s.isClosed { - return - } - s.isClosed = true - - if s.pool != nil { - s.pool.Close() - } - - if s.control != nil { - s.control.close() - } - - if s.nodeEvents != nil { - s.nodeEvents.stop() - } - - if s.schemaEvents != nil { - s.schemaEvents.stop() - } - - if s.quit != nil { - close(s.quit) - } -} - -func (s *Session) Closed() bool { - s.closeMu.RLock() - closed := s.isClosed - s.closeMu.RUnlock() - return closed -} - -func (s *Session) executeQuery(qry *Query) (it *Iter) { - // fail fast - if s.Closed() { - return &Iter{err: ErrSessionClosed} - } - - iter, err := s.executor.executeQuery(qry) - if err != nil { - return &Iter{err: err} - } - if iter == nil { - panic("nil iter") - } - - return iter -} - -func (s *Session) removeHost(h *HostInfo) { - s.policy.RemoveHost(h) - s.pool.removeHost(h.ConnectAddress()) - s.ring.removeHost(h.ConnectAddress()) -} - -// KeyspaceMetadata returns the schema metadata for the keyspace specified. Returns an error if the keyspace does not exist. -func (s *Session) KeyspaceMetadata(keyspace string) (*KeyspaceMetadata, error) { - // fail fast - if s.Closed() { - return nil, ErrSessionClosed - } else if keyspace == "" { - return nil, ErrNoKeyspace - } - - return s.schemaDescriber.getSchema(keyspace) -} - -func (s *Session) getConn() *Conn { - hosts := s.ring.allHosts() - for _, host := range hosts { - if !host.IsUp() { - continue - } - - pool, ok := s.pool.getPool(host) - if !ok { - continue - } else if conn := pool.Pick(); conn != nil { - return conn - } - } - - return nil -} - -// returns routing key indexes and type info -func (s *Session) routingKeyInfo(ctx context.Context, stmt string) (*routingKeyInfo, error) { - s.routingKeyInfoCache.mu.Lock() - - entry, cached := s.routingKeyInfoCache.lru.Get(stmt) - if cached { - // done accessing the cache - s.routingKeyInfoCache.mu.Unlock() - // the entry is an inflight struct similar to that used by - // Conn to prepare statements - inflight := entry.(*inflightCachedEntry) - - // wait for any inflight work - inflight.wg.Wait() - - if inflight.err != nil { - return nil, inflight.err - } - - key, _ := inflight.value.(*routingKeyInfo) - - return key, nil - } - - // create a new inflight entry while the data is created - inflight := new(inflightCachedEntry) - inflight.wg.Add(1) - defer inflight.wg.Done() - s.routingKeyInfoCache.lru.Add(stmt, inflight) - s.routingKeyInfoCache.mu.Unlock() - - var ( - info *preparedStatment - partitionKey []*ColumnMetadata - ) - - conn := s.getConn() - if conn == nil { - // TODO: better error? - inflight.err = errors.New("gocql: unable to fetch prepared info: no connection available") - return nil, inflight.err - } - - // get the query info for the statement - info, inflight.err = conn.prepareStatement(ctx, stmt, nil) - if inflight.err != nil { - // don't cache this error - s.routingKeyInfoCache.Remove(stmt) - return nil, inflight.err - } - - // TODO: it would be nice to mark hosts here but as we are not using the policies - // to fetch hosts we cant - - if info.request.colCount == 0 { - // no arguments, no routing key, and no error - return nil, nil - } - - if len(info.request.pkeyColumns) > 0 { - // proto v4 dont need to calculate primary key columns - types := make([]TypeInfo, len(info.request.pkeyColumns)) - for i, col := range info.request.pkeyColumns { - types[i] = info.request.columns[col].TypeInfo - } - - routingKeyInfo := &routingKeyInfo{ - indexes: info.request.pkeyColumns, - types: types, - } - - inflight.value = routingKeyInfo - return routingKeyInfo, nil - } - - // get the table metadata - table := info.request.columns[0].Table - - var keyspaceMetadata *KeyspaceMetadata - keyspaceMetadata, inflight.err = s.KeyspaceMetadata(info.request.columns[0].Keyspace) - if inflight.err != nil { - // don't cache this error - s.routingKeyInfoCache.Remove(stmt) - return nil, inflight.err - } - - tableMetadata, found := keyspaceMetadata.Tables[table] - if !found { - // unlikely that the statement could be prepared and the metadata for - // the table couldn't be found, but this may indicate either a bug - // in the metadata code, or that the table was just dropped. - inflight.err = ErrNoMetadata - // don't cache this error - s.routingKeyInfoCache.Remove(stmt) - return nil, inflight.err - } - - partitionKey = tableMetadata.PartitionKey - - size := len(partitionKey) - routingKeyInfo := &routingKeyInfo{ - indexes: make([]int, size), - types: make([]TypeInfo, size), - } - - for keyIndex, keyColumn := range partitionKey { - // set an indicator for checking if the mapping is missing - routingKeyInfo.indexes[keyIndex] = -1 - - // find the column in the query info - for argIndex, boundColumn := range info.request.columns { - if keyColumn.Name == boundColumn.Name { - // there may be many such bound columns, pick the first - routingKeyInfo.indexes[keyIndex] = argIndex - routingKeyInfo.types[keyIndex] = boundColumn.TypeInfo - break - } - } - - if routingKeyInfo.indexes[keyIndex] == -1 { - // missing a routing key column mapping - // no routing key, and no error - return nil, nil - } - } - - // cache this result - inflight.value = routingKeyInfo - - return routingKeyInfo, nil -} - -func (b *Batch) execute(conn *Conn) *Iter { - return conn.executeBatch(b) -} - -func (s *Session) executeBatch(batch *Batch) *Iter { - // fail fast - if s.Closed() { - return &Iter{err: ErrSessionClosed} - } - - // Prevent the execution of the batch if greater than the limit - // Currently batches have a limit of 65536 queries. - // https://datastax-oss.atlassian.net/browse/JAVA-229 - if batch.Size() > BatchSizeMaximum { - return &Iter{err: ErrTooManyStmts} - } - - iter, err := s.executor.executeQuery(batch) - if err != nil { - return &Iter{err: err} - } - - return iter -} - -// ExecuteBatch executes a batch operation and returns nil if successful -// otherwise an error is returned describing the failure. -func (s *Session) ExecuteBatch(batch *Batch) error { - iter := s.executeBatch(batch) - return iter.Close() -} - -// ExecuteBatchCAS executes a batch operation and returns true if successful and -// an iterator (to scan aditional rows if more than one conditional statement) -// was sent. -// Further scans on the interator must also remember to include -// the applied boolean as the first argument to *Iter.Scan -func (s *Session) ExecuteBatchCAS(batch *Batch, dest ...interface{}) (applied bool, iter *Iter, err error) { - iter = s.executeBatch(batch) - if err := iter.checkErrAndNotFound(); err != nil { - iter.Close() - return false, nil, err - } - - if len(iter.Columns()) > 1 { - dest = append([]interface{}{&applied}, dest...) - iter.Scan(dest...) - } else { - iter.Scan(&applied) - } - - return applied, iter, nil -} - -// MapExecuteBatchCAS executes a batch operation much like ExecuteBatchCAS, -// however it accepts a map rather than a list of arguments for the initial -// scan. -func (s *Session) MapExecuteBatchCAS(batch *Batch, dest map[string]interface{}) (applied bool, iter *Iter, err error) { - iter = s.executeBatch(batch) - if err := iter.checkErrAndNotFound(); err != nil { - iter.Close() - return false, nil, err - } - iter.MapScan(dest) - applied = dest["[applied]"].(bool) - delete(dest, "[applied]") - - // we usually close here, but instead of closing, just returin an error - // if MapScan failed. Although Close just returns err, using Close - // here might be confusing as we are not actually closing the iter - return applied, iter, iter.err -} - -func (s *Session) connect(host *HostInfo, errorHandler ConnErrorHandler) (*Conn, error) { - return s.dial(host.ConnectAddress(), host.Port(), s.connCfg, errorHandler) -} - -// Query represents a CQL statement that can be executed. -type Query struct { - stmt string - values []interface{} - cons Consistency - pageSize int - routingKey []byte - routingKeyBuffer []byte - pageState []byte - prefetch float64 - trace Tracer - observer QueryObserver - session *Session - rt RetryPolicy - binding func(q *QueryInfo) ([]interface{}, error) - attempts int - totalLatency int64 - serialCons SerialConsistency - defaultTimestamp bool - defaultTimestampValue int64 - disableSkipMetadata bool - context context.Context - - disableAutoPage bool -} - -// String implements the stringer interface. -func (q Query) String() string { - return fmt.Sprintf("[query statement=%q values=%+v consistency=%s]", q.stmt, q.values, q.cons) -} - -//Attempts returns the number of times the query was executed. -func (q *Query) Attempts() int { - return q.attempts -} - -//Latency returns the average amount of nanoseconds per attempt of the query. -func (q *Query) Latency() int64 { - if q.attempts > 0 { - return q.totalLatency / int64(q.attempts) - } - return 0 -} - -// Consistency sets the consistency level for this query. If no consistency -// level have been set, the default consistency level of the cluster -// is used. -func (q *Query) Consistency(c Consistency) *Query { - q.cons = c - return q -} - -// GetConsistency returns the currently configured consistency level for -// the query. -func (q *Query) GetConsistency() Consistency { - return q.cons -} - -// Trace enables tracing of this query. Look at the documentation of the -// Tracer interface to learn more about tracing. -func (q *Query) Trace(trace Tracer) *Query { - q.trace = trace - return q -} - -// Observer enables query-level observer on this query. -// The provided observer will be called every time this query is executed. -func (q *Query) Observer(observer QueryObserver) *Query { - q.observer = observer - return q -} - -// PageSize will tell the iterator to fetch the result in pages of size n. -// This is useful for iterating over large result sets, but setting the -// page size too low might decrease the performance. This feature is only -// available in Cassandra 2 and onwards. -func (q *Query) PageSize(n int) *Query { - q.pageSize = n - return q -} - -// DefaultTimestamp will enable the with default timestamp flag on the query. -// If enable, this will replace the server side assigned -// timestamp as default timestamp. Note that a timestamp in the query itself -// will still override this timestamp. This is entirely optional. -// -// Only available on protocol >= 3 -func (q *Query) DefaultTimestamp(enable bool) *Query { - q.defaultTimestamp = enable - return q -} - -// WithTimestamp will enable the with default timestamp flag on the query -// like DefaultTimestamp does. But also allows to define value for timestamp. -// It works the same way as USING TIMESTAMP in the query itself, but -// should not break prepared query optimization -// -// Only available on protocol >= 3 -func (q *Query) WithTimestamp(timestamp int64) *Query { - q.DefaultTimestamp(true) - q.defaultTimestampValue = timestamp - return q -} - -// RoutingKey sets the routing key to use when a token aware connection -// pool is used to optimize the routing of this query. -func (q *Query) RoutingKey(routingKey []byte) *Query { - q.routingKey = routingKey - return q -} - -// WithContext will set the context to use during a query, it will be used to -// timeout when waiting for responses from Cassandra. -func (q *Query) WithContext(ctx context.Context) *Query { - q.context = ctx - return q -} - -func (q *Query) execute(conn *Conn) *Iter { - return conn.executeQuery(q) -} - -func (q *Query) attempt(keyspace string, end, start time.Time, iter *Iter) { - q.attempts++ - q.totalLatency += end.Sub(start).Nanoseconds() - // TODO: track latencies per host and things as well instead of just total - - if q.observer != nil { - q.observer.ObserveQuery(q.context, ObservedQuery{ - Keyspace: keyspace, - Statement: q.stmt, - Start: start, - End: end, - Rows: iter.numRows, - Err: iter.err, - }) - } -} - -func (q *Query) retryPolicy() RetryPolicy { - return q.rt -} - -// Keyspace returns the keyspace the query will be executed against. -func (q *Query) Keyspace() string { - if q.session == nil { - return "" - } - // TODO(chbannis): this should be parsed from the query or we should let - // this be set by users. - return q.session.cfg.Keyspace -} - -// GetRoutingKey gets the routing key to use for routing this query. If -// a routing key has not been explicitly set, then the routing key will -// be constructed if possible using the keyspace's schema and the query -// info for this query statement. If the routing key cannot be determined -// then nil will be returned with no error. On any error condition, -// an error description will be returned. -func (q *Query) GetRoutingKey() ([]byte, error) { - if q.routingKey != nil { - return q.routingKey, nil - } else if q.binding != nil && len(q.values) == 0 { - // If this query was created using session.Bind we wont have the query - // values yet, so we have to pass down to the next policy. - // TODO: Remove this and handle this case - return nil, nil - } - - // try to determine the routing key - routingKeyInfo, err := q.session.routingKeyInfo(q.context, q.stmt) - if err != nil { - return nil, err - } - - if routingKeyInfo == nil { - return nil, nil - } - - if len(routingKeyInfo.indexes) == 1 { - // single column routing key - routingKey, err := Marshal( - routingKeyInfo.types[0], - q.values[routingKeyInfo.indexes[0]], - ) - if err != nil { - return nil, err - } - return routingKey, nil - } - - // We allocate that buffer only once, so that further re-bind/exec of the - // same query don't allocate more memory. - if q.routingKeyBuffer == nil { - q.routingKeyBuffer = make([]byte, 0, 256) - } - - // composite routing key - buf := bytes.NewBuffer(q.routingKeyBuffer) - for i := range routingKeyInfo.indexes { - encoded, err := Marshal( - routingKeyInfo.types[i], - q.values[routingKeyInfo.indexes[i]], - ) - if err != nil { - return nil, err - } - lenBuf := []byte{0x00, 0x00} - binary.BigEndian.PutUint16(lenBuf, uint16(len(encoded))) - buf.Write(lenBuf) - buf.Write(encoded) - buf.WriteByte(0x00) - } - routingKey := buf.Bytes() - return routingKey, nil -} - -func (q *Query) shouldPrepare() bool { - - stmt := strings.TrimLeftFunc(strings.TrimRightFunc(q.stmt, func(r rune) bool { - return unicode.IsSpace(r) || r == ';' - }), unicode.IsSpace) - - var stmtType string - if n := strings.IndexFunc(stmt, unicode.IsSpace); n >= 0 { - stmtType = strings.ToLower(stmt[:n]) - } - if stmtType == "begin" { - if n := strings.LastIndexFunc(stmt, unicode.IsSpace); n >= 0 { - stmtType = strings.ToLower(stmt[n+1:]) - } - } - switch stmtType { - case "select", "insert", "update", "delete", "batch": - return true - } - return false -} - -// SetPrefetch sets the default threshold for pre-fetching new pages. If -// there are only p*pageSize rows remaining, the next page will be requested -// automatically. -func (q *Query) Prefetch(p float64) *Query { - q.prefetch = p - return q -} - -// RetryPolicy sets the policy to use when retrying the query. -func (q *Query) RetryPolicy(r RetryPolicy) *Query { - q.rt = r - return q -} - -// Bind sets query arguments of query. This can also be used to rebind new query arguments -// to an existing query instance. -func (q *Query) Bind(v ...interface{}) *Query { - q.values = v - return q -} - -// SerialConsistency sets the consistency level for the -// serial phase of conditional updates. That consistency can only be -// either SERIAL or LOCAL_SERIAL and if not present, it defaults to -// SERIAL. This option will be ignored for anything else that a -// conditional update/insert. -func (q *Query) SerialConsistency(cons SerialConsistency) *Query { - q.serialCons = cons - return q -} - -// PageState sets the paging state for the query to resume paging from a specific -// point in time. Setting this will disable to query paging for this query, and -// must be used for all subsequent pages. -func (q *Query) PageState(state []byte) *Query { - q.pageState = state - q.disableAutoPage = true - return q -} - -// NoSkipMetadata will override the internal result metadata cache so that the driver does not -// send skip_metadata for queries, this means that the result will always contain -// the metadata to parse the rows and will not reuse the metadata from the prepared -// staement. This should only be used to work around cassandra bugs, such as when using -// CAS operations which do not end in Cas. -// -// See https://issues.apache.org/jira/browse/CASSANDRA-11099 -// https://github.com/gocql/gocql/issues/612 -func (q *Query) NoSkipMetadata() *Query { - q.disableSkipMetadata = true - return q -} - -// Exec executes the query without returning any rows. -func (q *Query) Exec() error { - return q.Iter().Close() -} - -func isUseStatement(stmt string) bool { - if len(stmt) < 3 { - return false - } - - return strings.ToLower(stmt[0:3]) == "use" -} - -// Iter executes the query and returns an iterator capable of iterating -// over all results. -func (q *Query) Iter() *Iter { - if isUseStatement(q.stmt) { - return &Iter{err: ErrUseStmt} - } - return q.session.executeQuery(q) -} - -// MapScan executes the query, copies the columns of the first selected -// row into the map pointed at by m and discards the rest. If no rows -// were selected, ErrNotFound is returned. -func (q *Query) MapScan(m map[string]interface{}) error { - iter := q.Iter() - if err := iter.checkErrAndNotFound(); err != nil { - return err - } - iter.MapScan(m) - return iter.Close() -} - -// Scan executes the query, copies the columns of the first selected -// row into the values pointed at by dest and discards the rest. If no rows -// were selected, ErrNotFound is returned. -func (q *Query) Scan(dest ...interface{}) error { - iter := q.Iter() - if err := iter.checkErrAndNotFound(); err != nil { - return err - } - iter.Scan(dest...) - return iter.Close() -} - -// ScanCAS executes a lightweight transaction (i.e. an UPDATE or INSERT -// statement containing an IF clause). If the transaction fails because -// the existing values did not match, the previous values will be stored -// in dest. -func (q *Query) ScanCAS(dest ...interface{}) (applied bool, err error) { - q.disableSkipMetadata = true - iter := q.Iter() - if err := iter.checkErrAndNotFound(); err != nil { - return false, err - } - if len(iter.Columns()) > 1 { - dest = append([]interface{}{&applied}, dest...) - iter.Scan(dest...) - } else { - iter.Scan(&applied) - } - return applied, iter.Close() -} - -// MapScanCAS executes a lightweight transaction (i.e. an UPDATE or INSERT -// statement containing an IF clause). If the transaction fails because -// the existing values did not match, the previous values will be stored -// in dest map. -// -// As for INSERT .. IF NOT EXISTS, previous values will be returned as if -// SELECT * FROM. So using ScanCAS with INSERT is inherently prone to -// column mismatching. MapScanCAS is added to capture them safely. -func (q *Query) MapScanCAS(dest map[string]interface{}) (applied bool, err error) { - q.disableSkipMetadata = true - iter := q.Iter() - if err := iter.checkErrAndNotFound(); err != nil { - return false, err - } - iter.MapScan(dest) - applied = dest["[applied]"].(bool) - delete(dest, "[applied]") - - return applied, iter.Close() -} - -// Release releases a query back into a pool of queries. Released Queries -// cannot be reused. -// -// Example: -// qry := session.Query("SELECT * FROM my_table") -// qry.Exec() -// qry.Release() -func (q *Query) Release() { - q.reset() - queryPool.Put(q) -} - -// reset zeroes out all fields of a query so that it can be safely pooled. -func (q *Query) reset() { - q.stmt = "" - q.values = nil - q.cons = 0 - q.pageSize = 0 - q.routingKey = nil - q.routingKeyBuffer = nil - q.pageState = nil - q.prefetch = 0 - q.trace = nil - q.session = nil - q.rt = nil - q.binding = nil - q.attempts = 0 - q.totalLatency = 0 - q.serialCons = 0 - q.defaultTimestamp = false - q.disableSkipMetadata = false - q.disableAutoPage = false - q.context = nil -} - -// Iter represents an iterator that can be used to iterate over all rows that -// were returned by a query. The iterator might send additional queries to the -// database during the iteration if paging was enabled. -type Iter struct { - err error - pos int - meta resultMetadata - numRows int - next *nextIter - host *HostInfo - - framer *framer - closed int32 -} - -// Host returns the host which the query was sent to. -func (iter *Iter) Host() *HostInfo { - return iter.host -} - -// Columns returns the name and type of the selected columns. -func (iter *Iter) Columns() []ColumnInfo { - return iter.meta.columns -} - -type Scanner interface { - // Next advances the row pointer to point at the next row, the row is valid until - // the next call of Next. It returns true if there is a row which is available to be - // scanned into with Scan. - // Next must be called before every call to Scan. - Next() bool - - // Scan copies the current row's columns into dest. If the length of dest does not equal - // the number of columns returned in the row an error is returned. If an error is encountered - // when unmarshalling a column into the value in dest an error is returned and the row is invalidated - // until the next call to Next. - // Next must be called before calling Scan, if it is not an error is returned. - Scan(...interface{}) error - - // Err returns the if there was one during iteration that resulted in iteration being unable to complete. - // Err will also release resources held by the iterator, the Scanner should not used after being called. - Err() error -} - -type iterScanner struct { - iter *Iter - cols [][]byte -} - -func (is *iterScanner) Next() bool { - iter := is.iter - if iter.err != nil { - return false - } - - if iter.pos >= iter.numRows { - if iter.next != nil { - is.iter = iter.next.fetch() - return is.Next() - } - return false - } - - cols := make([][]byte, len(iter.meta.columns)) - for i := 0; i < len(cols); i++ { - col, err := iter.readColumn() - if err != nil { - iter.err = err - return false - } - cols[i] = col - } - is.cols = cols - iter.pos++ - - return true -} - -func scanColumn(p []byte, col ColumnInfo, dest []interface{}) (int, error) { - if dest[0] == nil { - return 1, nil - } - - if col.TypeInfo.Type() == TypeTuple { - // this will panic, actually a bug, please report - tuple := col.TypeInfo.(TupleTypeInfo) - - count := len(tuple.Elems) - // here we pass in a slice of the struct which has the number number of - // values as elements in the tuple - if err := Unmarshal(col.TypeInfo, p, dest[:count]); err != nil { - return 0, err - } - return count, nil - } else { - if err := Unmarshal(col.TypeInfo, p, dest[0]); err != nil { - return 0, err - } - return 1, nil - } -} - -func (is *iterScanner) Scan(dest ...interface{}) error { - if is.cols == nil { - return errors.New("gocql: Scan called without calling Next") - } - - iter := is.iter - // currently only support scanning into an expand tuple, such that its the same - // as scanning in more values from a single column - if len(dest) != iter.meta.actualColCount { - return fmt.Errorf("gocql: not enough columns to scan into: have %d want %d", len(dest), iter.meta.actualColCount) - } - - // i is the current position in dest, could posible replace it and just use - // slices of dest - i := 0 - var err error - for _, col := range iter.meta.columns { - var n int - n, err = scanColumn(is.cols[i], col, dest[i:]) - if err != nil { - break - } - i += n - } - - is.cols = nil - - return err -} - -func (is *iterScanner) Err() error { - iter := is.iter - is.iter = nil - is.cols = nil - return iter.Close() -} - -// Scanner returns a row Scanner which provides an interface to scan rows in a manner which is -// similar to database/sql. The iter should NOT be used again after calling this method. -func (iter *Iter) Scanner() Scanner { - if iter == nil { - return nil - } - - return &iterScanner{iter: iter} -} - -func (iter *Iter) readColumn() ([]byte, error) { - return iter.framer.readBytesInternal() -} - -// Scan consumes the next row of the iterator and copies the columns of the -// current row into the values pointed at by dest. Use nil as a dest value -// to skip the corresponding column. Scan might send additional queries -// to the database to retrieve the next set of rows if paging was enabled. -// -// Scan returns true if the row was successfully unmarshaled or false if the -// end of the result set was reached or if an error occurred. Close should -// be called afterwards to retrieve any potential errors. -func (iter *Iter) Scan(dest ...interface{}) bool { - if iter.err != nil { - return false - } - - if iter.pos >= iter.numRows { - if iter.next != nil { - *iter = *iter.next.fetch() - return iter.Scan(dest...) - } - return false - } - - if iter.next != nil && iter.pos == iter.next.pos { - go iter.next.fetch() - } - - // currently only support scanning into an expand tuple, such that its the same - // as scanning in more values from a single column - if len(dest) != iter.meta.actualColCount { - iter.err = fmt.Errorf("gocql: not enough columns to scan into: have %d want %d", len(dest), iter.meta.actualColCount) - return false - } - - // i is the current position in dest, could posible replace it and just use - // slices of dest - i := 0 - for _, col := range iter.meta.columns { - colBytes, err := iter.readColumn() - if err != nil { - iter.err = err - return false - } - - n, err := scanColumn(colBytes, col, dest[i:]) - if err != nil { - iter.err = err - return false - } - i += n - } - - iter.pos++ - return true -} - -// GetCustomPayload returns any parsed custom payload results if given in the -// response from Cassandra. Note that the result is not a copy. -// -// This additional feature of CQL Protocol v4 -// allows additional results and query information to be returned by -// custom QueryHandlers running in your C* cluster. -// See https://datastax.github.io/java-driver/manual/custom_payloads/ -func (iter *Iter) GetCustomPayload() map[string][]byte { - return iter.framer.header.customPayload -} - -// Warnings returns any warnings generated if given in the response from Cassandra. -// -// This is only available starting with CQL Protocol v4. -func (iter *Iter) Warnings() []string { - if iter.framer != nil { - return iter.framer.header.warnings - } - return nil -} - -// Close closes the iterator and returns any errors that happened during -// the query or the iteration. -func (iter *Iter) Close() error { - if atomic.CompareAndSwapInt32(&iter.closed, 0, 1) { - if iter.framer != nil { - framerPool.Put(iter.framer) - iter.framer = nil - } - } - - return iter.err -} - -// WillSwitchPage detects if iterator reached end of current page -// and the next page is available. -func (iter *Iter) WillSwitchPage() bool { - return iter.pos >= iter.numRows && iter.next != nil -} - -// checkErrAndNotFound handle error and NotFound in one method. -func (iter *Iter) checkErrAndNotFound() error { - if iter.err != nil { - return iter.err - } else if iter.numRows == 0 { - return ErrNotFound - } - return nil -} - -// PageState return the current paging state for a query which can be used for -// subsequent quries to resume paging this point. -func (iter *Iter) PageState() []byte { - return iter.meta.pagingState -} - -// NumRows returns the number of rows in this pagination, it will update when new -// pages are fetched, it is not the value of the total number of rows this iter -// will return unless there is only a single page returned. -func (iter *Iter) NumRows() int { - return iter.numRows -} - -type nextIter struct { - qry Query - pos int - once sync.Once - next *Iter - conn *Conn -} - -func (n *nextIter) fetch() *Iter { - n.once.Do(func() { - iter := n.qry.session.executor.attemptQuery(&n.qry, n.conn) - if iter != nil && iter.err == nil { - n.next = iter - } else { - n.next = n.qry.session.executeQuery(&n.qry) - } - }) - return n.next -} - -type Batch struct { - Type BatchType - Entries []BatchEntry - Cons Consistency - rt RetryPolicy - observer BatchObserver - attempts int - totalLatency int64 - serialCons SerialConsistency - defaultTimestamp bool - defaultTimestampValue int64 - context context.Context - keyspace string -} - -// NewBatch creates a new batch operation without defaults from the cluster -// -// Depreicated: use session.NewBatch instead -func NewBatch(typ BatchType) *Batch { - return &Batch{Type: typ} -} - -// NewBatch creates a new batch operation using defaults defined in the cluster -func (s *Session) NewBatch(typ BatchType) *Batch { - s.mu.RLock() - batch := &Batch{ - Type: typ, - rt: s.cfg.RetryPolicy, - serialCons: s.cfg.SerialConsistency, - observer: s.batchObserver, - Cons: s.cons, - defaultTimestamp: s.cfg.DefaultTimestamp, - keyspace: s.cfg.Keyspace, - } - s.mu.RUnlock() - return batch -} - -// Observer enables batch-level observer on this batch. -// The provided observer will be called every time this batched query is executed. -func (b *Batch) Observer(observer BatchObserver) *Batch { - b.observer = observer - return b -} - -func (b *Batch) Keyspace() string { - return b.keyspace -} - -// Attempts returns the number of attempts made to execute the batch. -func (b *Batch) Attempts() int { - return b.attempts -} - -//Latency returns the average number of nanoseconds to execute a single attempt of the batch. -func (b *Batch) Latency() int64 { - if b.attempts > 0 { - return b.totalLatency / int64(b.attempts) - } - return 0 -} - -// GetConsistency returns the currently configured consistency level for the batch -// operation. -func (b *Batch) GetConsistency() Consistency { - return b.Cons -} - -// Query adds the query to the batch operation -func (b *Batch) Query(stmt string, args ...interface{}) { - b.Entries = append(b.Entries, BatchEntry{Stmt: stmt, Args: args}) -} - -// Bind adds the query to the batch operation and correlates it with a binding callback -// that will be invoked when the batch is executed. The binding callback allows the application -// to define which query argument values will be marshalled as part of the batch execution. -func (b *Batch) Bind(stmt string, bind func(q *QueryInfo) ([]interface{}, error)) { - b.Entries = append(b.Entries, BatchEntry{Stmt: stmt, binding: bind}) -} - -func (b *Batch) retryPolicy() RetryPolicy { - return b.rt -} - -// RetryPolicy sets the retry policy to use when executing the batch operation -func (b *Batch) RetryPolicy(r RetryPolicy) *Batch { - b.rt = r - return b -} - -// WithContext will set the context to use during a query, it will be used to -// timeout when waiting for responses from Cassandra. -func (b *Batch) WithContext(ctx context.Context) *Batch { - b.context = ctx - return b -} - -// Size returns the number of batch statements to be executed by the batch operation. -func (b *Batch) Size() int { - return len(b.Entries) -} - -// SerialConsistency sets the consistency level for the -// serial phase of conditional updates. That consistency can only be -// either SERIAL or LOCAL_SERIAL and if not present, it defaults to -// SERIAL. This option will be ignored for anything else that a -// conditional update/insert. -// -// Only available for protocol 3 and above -func (b *Batch) SerialConsistency(cons SerialConsistency) *Batch { - b.serialCons = cons - return b -} - -// DefaultTimestamp will enable the with default timestamp flag on the query. -// If enable, this will replace the server side assigned -// timestamp as default timestamp. Note that a timestamp in the query itself -// will still override this timestamp. This is entirely optional. -// -// Only available on protocol >= 3 -func (b *Batch) DefaultTimestamp(enable bool) *Batch { - b.defaultTimestamp = enable - return b -} - -// WithTimestamp will enable the with default timestamp flag on the query -// like DefaultTimestamp does. But also allows to define value for timestamp. -// It works the same way as USING TIMESTAMP in the query itself, but -// should not break prepared query optimization -// -// Only available on protocol >= 3 -func (b *Batch) WithTimestamp(timestamp int64) *Batch { - b.DefaultTimestamp(true) - b.defaultTimestampValue = timestamp - return b -} - -func (b *Batch) attempt(keyspace string, end, start time.Time, iter *Iter) { - b.attempts++ - b.totalLatency += end.Sub(start).Nanoseconds() - // TODO: track latencies per host and things as well instead of just total - - if b.observer == nil { - return - } - - statements := make([]string, len(b.Entries)) - for i, entry := range b.Entries { - statements[i] = entry.Stmt - } - - b.observer.ObserveBatch(b.context, ObservedBatch{ - Keyspace: keyspace, - Statements: statements, - Start: start, - End: end, - // Rows not used in batch observations // TODO - might be able to support it when using BatchCAS - Err: iter.err, - }) -} - -func (b *Batch) GetRoutingKey() ([]byte, error) { - // TODO: use the first statement in the batch as the routing key? - return nil, nil -} - -type BatchType byte - -const ( - LoggedBatch BatchType = 0 - UnloggedBatch BatchType = 1 - CounterBatch BatchType = 2 -) - -type BatchEntry struct { - Stmt string - Args []interface{} - binding func(q *QueryInfo) ([]interface{}, error) -} - -type ColumnInfo struct { - Keyspace string - Table string - Name string - TypeInfo TypeInfo -} - -func (c ColumnInfo) String() string { - return fmt.Sprintf("[column keyspace=%s table=%s name=%s type=%v]", c.Keyspace, c.Table, c.Name, c.TypeInfo) -} - -// routing key indexes LRU cache -type routingKeyInfoLRU struct { - lru *lru.Cache - mu sync.Mutex -} - -type routingKeyInfo struct { - indexes []int - types []TypeInfo -} - -func (r *routingKeyInfo) String() string { - return fmt.Sprintf("routing key index=%v types=%v", r.indexes, r.types) -} - -func (r *routingKeyInfoLRU) Remove(key string) { - r.mu.Lock() - r.lru.Remove(key) - r.mu.Unlock() -} - -//Max adjusts the maximum size of the cache and cleans up the oldest records if -//the new max is lower than the previous value. Not concurrency safe. -func (r *routingKeyInfoLRU) Max(max int) { - r.mu.Lock() - for r.lru.Len() > max { - r.lru.RemoveOldest() - } - r.lru.MaxEntries = max - r.mu.Unlock() -} - -type inflightCachedEntry struct { - wg sync.WaitGroup - err error - value interface{} -} - -// Tracer is the interface implemented by query tracers. Tracers have the -// ability to obtain a detailed event log of all events that happened during -// the execution of a query from Cassandra. Gathering this information might -// be essential for debugging and optimizing queries, but this feature should -// not be used on production systems with very high load. -type Tracer interface { - Trace(traceId []byte) -} - -type traceWriter struct { - session *Session - w io.Writer - mu sync.Mutex -} - -// NewTraceWriter returns a simple Tracer implementation that outputs -// the event log in a textual format. -func NewTraceWriter(session *Session, w io.Writer) Tracer { - return &traceWriter{session: session, w: w} -} - -func (t *traceWriter) Trace(traceId []byte) { - var ( - coordinator string - duration int - ) - iter := t.session.control.query(`SELECT coordinator, duration - FROM system_traces.sessions - WHERE session_id = ?`, traceId) - - iter.Scan(&coordinator, &duration) - if err := iter.Close(); err != nil { - t.mu.Lock() - fmt.Fprintln(t.w, "Error:", err) - t.mu.Unlock() - return - } - - var ( - timestamp time.Time - activity string - source string - elapsed int - ) - - fmt.Fprintf(t.w, "Tracing session %016x (coordinator: %s, duration: %v):\n", - traceId, coordinator, time.Duration(duration)*time.Microsecond) - - t.mu.Lock() - defer t.mu.Unlock() - - iter = t.session.control.query(`SELECT event_id, activity, source, source_elapsed - FROM system_traces.events - WHERE session_id = ?`, traceId) - - for iter.Scan(×tamp, &activity, &source, &elapsed) { - fmt.Fprintf(t.w, "%s: %s (source: %s, elapsed: %d)\n", - timestamp.Format("2006/01/02 15:04:05.999999"), activity, source, elapsed) - } - - if err := iter.Close(); err != nil { - fmt.Fprintln(t.w, "Error:", err) - } -} - -type ObservedQuery struct { - Keyspace string - Statement string - - Start time.Time // time immediately before the query was called - End time.Time // time immediately after the query returned - - // Rows is the number of rows in the current iter. - // In paginated queries, rows from previous scans are not counted. - // Rows is not used in batch queries and remains at the default value - Rows int - - // Err is the error in the query. - // It only tracks network errors or errors of bad cassandra syntax, in particular selects with no match return nil error - Err error -} - -// QueryObserver is the interface implemented by query observers / stat collectors. -// -// Experimental, this interface and use may change -type QueryObserver interface { - // ObserveQuery gets called on every query to cassandra, including all queries in an iterator when paging is enabled. - // It doesn't get called if there is no query because the session is closed or there are no connections available. - // The error reported only shows query errors, i.e. if a SELECT is valid but finds no matches it will be nil. - ObserveQuery(context.Context, ObservedQuery) -} - -type ObservedBatch struct { - Keyspace string - Statements []string - - Start time.Time // time immediately before the batch query was called - End time.Time // time immediately after the batch query returned - - // Err is the error in the batch query. - // It only tracks network errors or errors of bad cassandra syntax, in particular selects with no match return nil error - Err error -} - -// BatchObserver is the interface implemented by batch observers / stat collectors. -type BatchObserver interface { - // ObserveBatch gets called on every batch query to cassandra. - // It also gets called once for each query in a batch. - // It doesn't get called if there is no query because the session is closed or there are no connections available. - // The error reported only shows query errors, i.e. if a SELECT is valid but finds no matches it will be nil. - // Unlike QueryObserver.ObserveQuery it does no reporting on rows read. - ObserveBatch(context.Context, ObservedBatch) -} - -type Error struct { - Code int - Message string -} - -func (e Error) Error() string { - return e.Message -} - -var ( - ErrNotFound = errors.New("not found") - ErrUnavailable = errors.New("unavailable") - ErrUnsupported = errors.New("feature not supported") - ErrTooManyStmts = errors.New("too many statements") - ErrUseStmt = errors.New("use statements aren't supported. Please see https://github.com/gocql/gocql for explanation.") - ErrSessionClosed = errors.New("session has been closed") - ErrNoConnections = errors.New("gocql: no hosts available in the pool") - ErrNoKeyspace = errors.New("no keyspace provided") - ErrKeyspaceDoesNotExist = errors.New("keyspace does not exist") - ErrNoMetadata = errors.New("no metadata available") -) - -type ErrProtocol struct{ error } - -func NewErrProtocol(format string, args ...interface{}) error { - return ErrProtocol{fmt.Errorf(format, args...)} -} - -// BatchSizeMaximum is the maximum number of statements a batch operation can have. -// This limit is set by cassandra and could change in the future. -const BatchSizeMaximum = 65535 diff --git a/vendor/github.com/gocql/gocql/token.go b/vendor/github.com/gocql/gocql/token.go deleted file mode 100644 index bdfcceb98e..0000000000 --- a/vendor/github.com/gocql/gocql/token.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright (c) 2015 The gocql Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gocql - -import ( - "bytes" - "crypto/md5" - "fmt" - "math/big" - "sort" - "strconv" - "strings" - - "github.com/gocql/gocql/internal/murmur" -) - -// a token partitioner -type partitioner interface { - Name() string - Hash([]byte) token - ParseString(string) token -} - -// a token -type token interface { - fmt.Stringer - Less(token) bool -} - -// murmur3 partitioner and token -type murmur3Partitioner struct{} -type murmur3Token int64 - -func (p murmur3Partitioner) Name() string { - return "Murmur3Partitioner" -} - -func (p murmur3Partitioner) Hash(partitionKey []byte) token { - h1 := murmur.Murmur3H1(partitionKey) - return murmur3Token(h1) -} - -// murmur3 little-endian, 128-bit hash, but returns only h1 -func (p murmur3Partitioner) ParseString(str string) token { - val, _ := strconv.ParseInt(str, 10, 64) - return murmur3Token(val) -} - -func (m murmur3Token) String() string { - return strconv.FormatInt(int64(m), 10) -} - -func (m murmur3Token) Less(token token) bool { - return m < token.(murmur3Token) -} - -// order preserving partitioner and token -type orderedPartitioner struct{} -type orderedToken string - -func (p orderedPartitioner) Name() string { - return "OrderedPartitioner" -} - -func (p orderedPartitioner) Hash(partitionKey []byte) token { - // the partition key is the token - return orderedToken(partitionKey) -} - -func (p orderedPartitioner) ParseString(str string) token { - return orderedToken(str) -} - -func (o orderedToken) String() string { - return string(o) -} - -func (o orderedToken) Less(token token) bool { - return o < token.(orderedToken) -} - -// random partitioner and token -type randomPartitioner struct{} -type randomToken big.Int - -func (r randomPartitioner) Name() string { - return "RandomPartitioner" -} - -// 2 ** 128 -var maxHashInt, _ = new(big.Int).SetString("340282366920938463463374607431768211456", 10) - -func (p randomPartitioner) Hash(partitionKey []byte) token { - sum := md5.Sum(partitionKey) - val := new(big.Int) - val.SetBytes(sum[:]) - if sum[0] > 127 { - val.Sub(val, maxHashInt) - val.Abs(val) - } - - return (*randomToken)(val) -} - -func (p randomPartitioner) ParseString(str string) token { - val := new(big.Int) - val.SetString(str, 10) - return (*randomToken)(val) -} - -func (r *randomToken) String() string { - return (*big.Int)(r).String() -} - -func (r *randomToken) Less(token token) bool { - return -1 == (*big.Int)(r).Cmp((*big.Int)(token.(*randomToken))) -} - -type hostToken struct { - token token - host *HostInfo -} - -func (ht hostToken) String() string { - return fmt.Sprintf("{token=%v host=%v}", ht.token, ht.host.HostID()) -} - -// a data structure for organizing the relationship between tokens and hosts -type tokenRing struct { - partitioner partitioner - tokens []hostToken -} - -func newTokenRing(partitioner string, hosts []*HostInfo) (*tokenRing, error) { - tokenRing := &tokenRing{} - - if strings.HasSuffix(partitioner, "Murmur3Partitioner") { - tokenRing.partitioner = murmur3Partitioner{} - } else if strings.HasSuffix(partitioner, "OrderedPartitioner") { - tokenRing.partitioner = orderedPartitioner{} - } else if strings.HasSuffix(partitioner, "RandomPartitioner") { - tokenRing.partitioner = randomPartitioner{} - } else { - return nil, fmt.Errorf("Unsupported partitioner '%s'", partitioner) - } - - for _, host := range hosts { - for _, strToken := range host.Tokens() { - token := tokenRing.partitioner.ParseString(strToken) - tokenRing.tokens = append(tokenRing.tokens, hostToken{token, host}) - } - } - - sort.Sort(tokenRing) - - return tokenRing, nil -} - -func (t *tokenRing) Len() int { - return len(t.tokens) -} - -func (t *tokenRing) Less(i, j int) bool { - return t.tokens[i].token.Less(t.tokens[j].token) -} - -func (t *tokenRing) Swap(i, j int) { - t.tokens[i], t.tokens[j] = t.tokens[j], t.tokens[i] -} - -func (t *tokenRing) String() string { - buf := &bytes.Buffer{} - buf.WriteString("TokenRing(") - if t.partitioner != nil { - buf.WriteString(t.partitioner.Name()) - } - buf.WriteString("){") - sep := "" - for i, th := range t.tokens { - buf.WriteString(sep) - sep = "," - buf.WriteString("\n\t[") - buf.WriteString(strconv.Itoa(i)) - buf.WriteString("]") - buf.WriteString(th.token.String()) - buf.WriteString(":") - buf.WriteString(th.host.ConnectAddress().String()) - } - buf.WriteString("\n}") - return string(buf.Bytes()) -} - -func (t *tokenRing) GetHostForPartitionKey(partitionKey []byte) *HostInfo { - if t == nil { - return nil - } - - token := t.partitioner.Hash(partitionKey) - return t.GetHostForToken(token) -} - -func (t *tokenRing) GetHostForToken(token token) *HostInfo { - if t == nil || len(t.tokens) == 0 { - return nil - } - - // find the primary replica - ringIndex := sort.Search(len(t.tokens), func(i int) bool { - return !t.tokens[i].token.Less(token) - }) - - if ringIndex == len(t.tokens) { - // wrap around to the first in the ring - ringIndex = 0 - } - - return t.tokens[ringIndex].host -} diff --git a/vendor/github.com/gocql/gocql/topology.go b/vendor/github.com/gocql/gocql/topology.go deleted file mode 100644 index 735dc9dab3..0000000000 --- a/vendor/github.com/gocql/gocql/topology.go +++ /dev/null @@ -1,212 +0,0 @@ -package gocql - -import ( - "fmt" - "strconv" - "strings" -) - -type placementStrategy interface { - replicaMap(hosts []*HostInfo, tokens []hostToken) map[token][]*HostInfo - replicationFactor(dc string) int -} - -func getReplicationFactorFromOpts(keyspace string, val interface{}) int { - // TODO: dont really want to panic here, but is better - // than spamming - switch v := val.(type) { - case int: - if v <= 0 { - panic(fmt.Sprintf("invalid replication_factor %d. Is the %q keyspace configured correctly?", v, keyspace)) - } - return v - case string: - n, err := strconv.Atoi(v) - if err != nil { - panic(fmt.Sprintf("invalid replication_factor. Is the %q keyspace configured correctly? %v", keyspace, err)) - } else if n <= 0 { - panic(fmt.Sprintf("invalid replication_factor %d. Is the %q keyspace configured correctly?", n, keyspace)) - } - return n - default: - panic(fmt.Sprintf("unkown replication_factor type %T", v)) - } -} - -func getStrategy(ks *KeyspaceMetadata) placementStrategy { - switch { - case strings.Contains(ks.StrategyClass, "SimpleStrategy"): - return &simpleStrategy{rf: getReplicationFactorFromOpts(ks.Name, ks.StrategyOptions["replication_factor"])} - case strings.Contains(ks.StrategyClass, "NetworkTopologyStrategy"): - dcs := make(map[string]int) - for dc, rf := range ks.StrategyOptions { - if dc == "class" { - continue - } - - dcs[dc] = getReplicationFactorFromOpts(ks.Name+":dc="+dc, rf) - } - return &networkTopology{dcs: dcs} - default: - // TODO: handle unknown replicas and just return the primary host for a token - panic(fmt.Sprintf("unsupported strategy class: %v", ks.StrategyClass)) - } -} - -type simpleStrategy struct { - rf int -} - -func (s *simpleStrategy) replicationFactor(dc string) int { - return s.rf -} - -func (s *simpleStrategy) replicaMap(_ []*HostInfo, tokens []hostToken) map[token][]*HostInfo { - tokenRing := make(map[token][]*HostInfo, len(tokens)) - - for i, th := range tokens { - replicas := make([]*HostInfo, 0, s.rf) - for j := 0; j < len(tokens) && len(replicas) < s.rf; j++ { - // TODO: need to ensure we dont add the same hosts twice - h := tokens[(i+j)%len(tokens)] - replicas = append(replicas, h.host) - } - tokenRing[th.token] = replicas - } - - return tokenRing -} - -type networkTopology struct { - dcs map[string]int -} - -func (n *networkTopology) replicationFactor(dc string) int { - return n.dcs[dc] -} - -func (n *networkTopology) haveRF(replicaCounts map[string]int) bool { - if len(replicaCounts) != len(n.dcs) { - return false - } - - for dc, rf := range n.dcs { - if rf != replicaCounts[dc] { - return false - } - } - - return true -} - -func (n *networkTopology) replicaMap(hosts []*HostInfo, tokens []hostToken) map[token][]*HostInfo { - dcRacks := make(map[string]map[string]struct{}) - - for _, h := range hosts { - dc := h.DataCenter() - rack := h.Rack() - - racks, ok := dcRacks[dc] - if !ok { - racks = make(map[string]struct{}) - dcRacks[dc] = racks - } - racks[rack] = struct{}{} - } - - tokenRing := make(map[token][]*HostInfo, len(tokens)) - - var totalRF int - for _, rf := range n.dcs { - totalRF += rf - } - - for i, th := range tokens { - // number of replicas per dc - // TODO: recycle these - replicasInDC := make(map[string]int, len(n.dcs)) - // dc -> racks - seenDCRacks := make(map[string]map[string]struct{}, len(n.dcs)) - // skipped hosts in a dc - skipped := make(map[string][]*HostInfo, len(n.dcs)) - - replicas := make([]*HostInfo, 0, totalRF) - for j := 0; j < len(tokens) && !n.haveRF(replicasInDC); j++ { - // TODO: ensure we dont add the same host twice - h := tokens[(i+j)%len(tokens)].host - - dc := h.DataCenter() - rack := h.Rack() - - rf, ok := n.dcs[dc] - if !ok { - // skip this DC, dont know about it - continue - } else if replicasInDC[dc] >= rf { - if replicasInDC[dc] > rf { - panic(fmt.Sprintf("replica overflow. rf=%d have=%d in dc %q", rf, replicasInDC[dc], dc)) - } - - // have enough replicas in this DC - continue - } else if _, ok := dcRacks[dc][rack]; !ok { - // dont know about this rack - continue - } else if len(replicas) >= totalRF { - if replicasInDC[dc] > rf { - panic(fmt.Sprintf("replica overflow. total rf=%d have=%d", totalRF, len(replicas))) - } - - // we now have enough replicas - break - } - - racks := seenDCRacks[dc] - if _, ok := racks[rack]; ok && len(racks) == len(dcRacks[dc]) { - // we have been through all the racks and dont have RF yet, add this - replicas = append(replicas, h) - replicasInDC[dc]++ - } else if !ok { - if racks == nil { - racks = make(map[string]struct{}, 1) - seenDCRacks[dc] = racks - } - - // new rack - racks[rack] = struct{}{} - replicas = append(replicas, h) - replicasInDC[dc]++ - - if len(racks) == len(dcRacks[dc]) { - // if we have been through all the racks, drain the rest of the skipped - // hosts until we have RF. The next iteration will skip in the block - // above - skippedHosts := skipped[dc] - var k int - for ; k < len(skippedHosts) && replicasInDC[dc] < rf; k++ { - sh := skippedHosts[k] - replicas = append(replicas, sh) - replicasInDC[dc]++ - } - skipped[dc] = skippedHosts[k:] - } - } else { - // already seen this rack, keep hold of this host incase - // we dont get enough for rf - skipped[dc] = append(skipped[dc], h) - } - } - - if len(replicas) == 0 || replicas[0] != th.host { - panic("first replica is not the primary replica for the token") - } - - tokenRing[th.token] = replicas - } - - if len(tokenRing) != len(tokens) { - panic(fmt.Sprintf("token map different size to token ring: got %d expected %d", len(tokenRing), len(tokens))) - } - - return tokenRing -} diff --git a/vendor/github.com/gocql/gocql/uuid.go b/vendor/github.com/gocql/gocql/uuid.go deleted file mode 100644 index 7ca4c087a6..0000000000 --- a/vendor/github.com/gocql/gocql/uuid.go +++ /dev/null @@ -1,272 +0,0 @@ -// Copyright (c) 2012 The gocql Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The uuid package can be used to generate and parse universally unique -// identifiers, a standardized format in the form of a 128 bit number. -// -// http://tools.ietf.org/html/rfc4122 -package gocql - -import ( - "crypto/rand" - "errors" - "fmt" - "io" - "net" - "strings" - "sync/atomic" - "time" -) - -type UUID [16]byte - -var hardwareAddr []byte -var clockSeq uint32 - -const ( - VariantNCSCompat = 0 - VariantIETF = 2 - VariantMicrosoft = 6 - VariantFuture = 7 -) - -func init() { - if interfaces, err := net.Interfaces(); err == nil { - for _, i := range interfaces { - if i.Flags&net.FlagLoopback == 0 && len(i.HardwareAddr) > 0 { - hardwareAddr = i.HardwareAddr - break - } - } - } - if hardwareAddr == nil { - // If we failed to obtain the MAC address of the current computer, - // we will use a randomly generated 6 byte sequence instead and set - // the multicast bit as recommended in RFC 4122. - hardwareAddr = make([]byte, 6) - _, err := io.ReadFull(rand.Reader, hardwareAddr) - if err != nil { - panic(err) - } - hardwareAddr[0] = hardwareAddr[0] | 0x01 - } - - // initialize the clock sequence with a random number - var clockSeqRand [2]byte - io.ReadFull(rand.Reader, clockSeqRand[:]) - clockSeq = uint32(clockSeqRand[1])<<8 | uint32(clockSeqRand[0]) -} - -// ParseUUID parses a 32 digit hexadecimal number (that might contain hypens) -// representing an UUID. -func ParseUUID(input string) (UUID, error) { - var u UUID - j := 0 - for _, r := range input { - switch { - case r == '-' && j&1 == 0: - continue - case r >= '0' && r <= '9' && j < 32: - u[j/2] |= byte(r-'0') << uint(4-j&1*4) - case r >= 'a' && r <= 'f' && j < 32: - u[j/2] |= byte(r-'a'+10) << uint(4-j&1*4) - case r >= 'A' && r <= 'F' && j < 32: - u[j/2] |= byte(r-'A'+10) << uint(4-j&1*4) - default: - return UUID{}, fmt.Errorf("invalid UUID %q", input) - } - j += 1 - } - if j != 32 { - return UUID{}, fmt.Errorf("invalid UUID %q", input) - } - return u, nil -} - -// UUIDFromBytes converts a raw byte slice to an UUID. -func UUIDFromBytes(input []byte) (UUID, error) { - var u UUID - if len(input) != 16 { - return u, errors.New("UUIDs must be exactly 16 bytes long") - } - - copy(u[:], input) - return u, nil -} - -// RandomUUID generates a totally random UUID (version 4) as described in -// RFC 4122. -func RandomUUID() (UUID, error) { - var u UUID - _, err := io.ReadFull(rand.Reader, u[:]) - if err != nil { - return u, err - } - u[6] &= 0x0F // clear version - u[6] |= 0x40 // set version to 4 (random uuid) - u[8] &= 0x3F // clear variant - u[8] |= 0x80 // set to IETF variant - return u, nil -} - -var timeBase = time.Date(1582, time.October, 15, 0, 0, 0, 0, time.UTC).Unix() - -// TimeUUID generates a new time based UUID (version 1) using the current -// time as the timestamp. -func TimeUUID() UUID { - return UUIDFromTime(time.Now()) -} - -// UUIDFromTime generates a new time based UUID (version 1) as described in -// RFC 4122. This UUID contains the MAC address of the node that generated -// the UUID, the given timestamp and a sequence number. -func UUIDFromTime(aTime time.Time) UUID { - utcTime := aTime.In(time.UTC) - t := int64(utcTime.Unix()-timeBase)*10000000 + int64(utcTime.Nanosecond()/100) - clock := atomic.AddUint32(&clockSeq, 1) - - return TimeUUIDWith(t, clock, hardwareAddr) -} - -// TimeUUIDWith generates a new time based UUID (version 1) as described in -// RFC4122 with given parameters. t is the number of 100's of nanoseconds -// since 15 Oct 1582 (60bits). clock is the number of clock sequence (14bits). -// node is a slice to gurarantee the uniqueness of the UUID (up to 6bytes). -// Note: calling this function does not increment the static clock sequence. -func TimeUUIDWith(t int64, clock uint32, node []byte) UUID { - var u UUID - - u[0], u[1], u[2], u[3] = byte(t>>24), byte(t>>16), byte(t>>8), byte(t) - u[4], u[5] = byte(t>>40), byte(t>>32) - u[6], u[7] = byte(t>>56)&0x0F, byte(t>>48) - - u[8] = byte(clock >> 8) - u[9] = byte(clock) - - copy(u[10:], node) - - u[6] |= 0x10 // set version to 1 (time based uuid) - u[8] &= 0x3F // clear variant - u[8] |= 0x80 // set to IETF variant - - return u -} - -// String returns the UUID in it's canonical form, a 32 digit hexadecimal -// number in the form of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (u UUID) String() string { - var offsets = [...]int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} - const hexString = "0123456789abcdef" - r := make([]byte, 36) - for i, b := range u { - r[offsets[i]] = hexString[b>>4] - r[offsets[i]+1] = hexString[b&0xF] - } - r[8] = '-' - r[13] = '-' - r[18] = '-' - r[23] = '-' - return string(r) - -} - -// Bytes returns the raw byte slice for this UUID. A UUID is always 128 bits -// (16 bytes) long. -func (u UUID) Bytes() []byte { - return u[:] -} - -// Variant returns the variant of this UUID. This package will only generate -// UUIDs in the IETF variant. -func (u UUID) Variant() int { - x := u[8] - if x&0x80 == 0 { - return VariantNCSCompat - } - if x&0x40 == 0 { - return VariantIETF - } - if x&0x20 == 0 { - return VariantMicrosoft - } - return VariantFuture -} - -// Version extracts the version of this UUID variant. The RFC 4122 describes -// five kinds of UUIDs. -func (u UUID) Version() int { - return int(u[6] & 0xF0 >> 4) -} - -// Node extracts the MAC address of the node who generated this UUID. It will -// return nil if the UUID is not a time based UUID (version 1). -func (u UUID) Node() []byte { - if u.Version() != 1 { - return nil - } - return u[10:] -} - -// Clock extracts the clock sequence of this UUID. It will return zero if the -// UUID is not a time based UUID (version 1). -func (u UUID) Clock() uint32 { - if u.Version() != 1 { - return 0 - } - - // Clock sequence is the lower 14bits of u[8:10] - return uint32(u[8]&0x3F)<<8 | uint32(u[9]) -} - -// Timestamp extracts the timestamp information from a time based UUID -// (version 1). -func (u UUID) Timestamp() int64 { - if u.Version() != 1 { - return 0 - } - return int64(uint64(u[0])<<24|uint64(u[1])<<16| - uint64(u[2])<<8|uint64(u[3])) + - int64(uint64(u[4])<<40|uint64(u[5])<<32) + - int64(uint64(u[6]&0x0F)<<56|uint64(u[7])<<48) -} - -// Time is like Timestamp, except that it returns a time.Time. -func (u UUID) Time() time.Time { - if u.Version() != 1 { - return time.Time{} - } - t := u.Timestamp() - sec := t / 1e7 - nsec := (t % 1e7) * 100 - return time.Unix(sec+timeBase, nsec).UTC() -} - -// Marshaling for JSON -func (u UUID) MarshalJSON() ([]byte, error) { - return []byte(`"` + u.String() + `"`), nil -} - -// Unmarshaling for JSON -func (u *UUID) UnmarshalJSON(data []byte) error { - str := strings.Trim(string(data), `"`) - if len(str) > 36 { - return fmt.Errorf("invalid JSON UUID %s", str) - } - - parsed, err := ParseUUID(str) - if err == nil { - copy(u[:], parsed[:]) - } - - return err -} - -func (u UUID) MarshalText() ([]byte, error) { - return []byte(u.String()), nil -} - -func (u *UUID) UnmarshalText(text []byte) (err error) { - *u, err = ParseUUID(string(text)) - return -} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore deleted file mode 100644 index 042091d9b3..0000000000 --- a/vendor/github.com/golang/snappy/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -cmd/snappytool/snappytool -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS deleted file mode 100644 index bcfa19520a..0000000000 --- a/vendor/github.com/golang/snappy/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Damian Gryski -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Rodolfo Carvalho -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS deleted file mode 100644 index 931ae31606..0000000000 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ /dev/null @@ -1,37 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Damian Gryski -Jan Mercl <0xjnml@gmail.com> -Kai Backman -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE deleted file mode 100644 index 6050c10f4c..0000000000 --- a/vendor/github.com/golang/snappy/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README deleted file mode 100644 index cea12879a0..0000000000 --- a/vendor/github.com/golang/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go deleted file mode 100644 index 72efb0353d..0000000000 --- a/vendor/github.com/golang/snappy/decode.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } - if !r.readFull(r.buf[:4], true) { - return 0, r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return 0, r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return 0, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return 0, r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.decoded[:n], false) { - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return 0, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return 0, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err - } - } -} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go deleted file mode 100644 index fcd192b849..0000000000 --- a/vendor/github.com/golang/snappy/decode_amd64.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s deleted file mode 100644 index e6179f65e3..0000000000 --- a/vendor/github.com/golang/snappy/decode_amd64.s +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 - -loop: - // for s < len(src) - CMPQ SI, R13 - JEQ end - - // CX = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 - JAE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - SHRL $2, CX - CMPL CX, $60 - JAE tagLit60Plus - - // case x < 60: - // s++ - INCQ SI - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that CX == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMPQ CX, $16 - JGT callMemmove - CMPQ AX, $16 - JLT callMemmove - CMPQ BX, $16 - JLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX - JGT errCorrupt - CMPQ CX, BX - JGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // case x == 60: - CMPL CX, $61 - JEQ tagLit61 - JA tagLit62Plus - - // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX - JMP doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX - JMP doLit - -tagLit62Plus: - CMPL CX, $62 - JA tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX - JMP doLit - -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADDQ $5, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX - JMP doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADDQ $3, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX - JMP doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 - JEQ tagCopy2 - JA tagCopy4 - - // case tagCopy1: - // s += 2 - ADDQ $2, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX - - // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - CX == length && CX > 0 - // - DX == offset - - // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt - - // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX - JLT errCorrupt - - // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX - JGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMPQ CX, $16 - JGT slowForwardCopy - CMPQ DX, $8 - JLT slowForwardCopy - CMPQ R14, $16 - JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI - JMP loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUBQ $10, R14 - CMPQ CX, R14 - JGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMPQ DX, $8 - JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX - JMP makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVQ DI, AX - ADDQ CX, DI - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - CMPQ CX, $0 - JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX - JMP finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMPQ DI, R10 - JNE errCorrupt - - // return 0 - MOVQ $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go deleted file mode 100644 index 8c9f2049bc..0000000000 --- a/vendor/github.com/golang/snappy/decode_other.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike - // the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - for end := d + length; d != end; d++ { - dst[d] = dst[d-offset] - } - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go deleted file mode 100644 index 8d393e904b..0000000000 --- a/vendor/github.com/golang/snappy/encode.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go deleted file mode 100644 index 150d91bc8b..0000000000 --- a/vendor/github.com/golang/snappy/encode_amd64.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s deleted file mode 100644 index adfd979fe2..0000000000 --- a/vendor/github.com/golang/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go deleted file mode 100644 index dbcae905e6..0000000000 --- a/vendor/github.com/golang/snappy/encode_other.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go deleted file mode 100644 index 0cf5e379c4..0000000000 --- a/vendor/github.com/golang/snappy/snappy.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the snappy block-based compression format. -// It aims for very high speeds and reasonable compression. -// -// The C++ snappy implementation is at https://github.com/google/snappy -package snappy // import "github.com/golang/snappy" - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/hailocab/go-hostpool/.gitignore b/vendor/github.com/hailocab/go-hostpool/.gitignore deleted file mode 100644 index 00268614f0..0000000000 --- a/vendor/github.com/hailocab/go-hostpool/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/hailocab/go-hostpool/.travis.yml b/vendor/github.com/hailocab/go-hostpool/.travis.yml deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vendor/github.com/hailocab/go-hostpool/LICENSE b/vendor/github.com/hailocab/go-hostpool/LICENSE deleted file mode 100644 index f24db89c4e..0000000000 --- a/vendor/github.com/hailocab/go-hostpool/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Bitly - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/hailocab/go-hostpool/README.md b/vendor/github.com/hailocab/go-hostpool/README.md deleted file mode 100644 index 7f4437277d..0000000000 --- a/vendor/github.com/hailocab/go-hostpool/README.md +++ /dev/null @@ -1,17 +0,0 @@ -go-hostpool -=========== - -A Go package to intelligently and flexibly pool among multiple hosts from your Go application. -Host selection can operate in round robin or epsilon greedy mode, and unresponsive hosts are -avoided. -Usage example: - -```go -hp := hostpool.NewEpsilonGreedy([]string{"a", "b"}, 0, &hostpool.LinearEpsilonValueCalculator{}) -hostResponse := hp.Get() -hostname := hostResponse.Host() -err := _ // (make a request with hostname) -hostResponse.Mark(err) -``` - -View more detailed documentation on [godoc.org](http://godoc.org/github.com/bitly/go-hostpool) diff --git a/vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go b/vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go deleted file mode 100644 index 8627aa5cd2..0000000000 --- a/vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go +++ /dev/null @@ -1,220 +0,0 @@ -package hostpool - -import ( - "log" - "math/rand" - "time" -) - -type epsilonHostPoolResponse struct { - standardHostPoolResponse - started time.Time - ended time.Time -} - -func (r *epsilonHostPoolResponse) Mark(err error) { - r.Do(func() { - r.ended = time.Now() - doMark(err, r) - }) -} - -type epsilonGreedyHostPool struct { - standardHostPool // TODO - would be nifty if we could embed HostPool and Locker interfaces - epsilon float32 // this is our exploration factor - decayDuration time.Duration - EpsilonValueCalculator // embed the epsilonValueCalculator - timer - quit chan bool -} - -// Construct an Epsilon Greedy HostPool -// -// Epsilon Greedy is an algorithm that allows HostPool not only to track failure state, -// but also to learn about "better" options in terms of speed, and to pick from available hosts -// based on how well they perform. This gives a weighted request rate to better -// performing hosts, while still distributing requests to all hosts (proportionate to their performance). -// The interface is the same as the standard HostPool, but be sure to mark the HostResponse immediately -// after executing the request to the host, as that will stop the implicitly running request timer. -// -// A good overview of Epsilon Greedy is here http://stevehanov.ca/blog/index.php?id=132 -// -// To compute the weighting scores, we perform a weighted average of recent response times, over the course of -// `decayDuration`. decayDuration may be set to 0 to use the default value of 5 minutes -// We then use the supplied EpsilonValueCalculator to calculate a score from that weighted average response time. -func NewEpsilonGreedy(hosts []string, decayDuration time.Duration, calc EpsilonValueCalculator) HostPool { - - if decayDuration <= 0 { - decayDuration = defaultDecayDuration - } - stdHP := New(hosts).(*standardHostPool) - p := &epsilonGreedyHostPool{ - standardHostPool: *stdHP, - epsilon: float32(initialEpsilon), - decayDuration: decayDuration, - EpsilonValueCalculator: calc, - timer: &realTimer{}, - quit: make(chan bool), - } - - // allocate structures - for _, h := range p.hostList { - h.epsilonCounts = make([]int64, epsilonBuckets) - h.epsilonValues = make([]int64, epsilonBuckets) - } - go p.epsilonGreedyDecay() - return p -} - -func (p *epsilonGreedyHostPool) Close() { - // No need to do p.quit <- true as close(p.quit) does the trick. - close(p.quit) -} - -func (p *epsilonGreedyHostPool) SetEpsilon(newEpsilon float32) { - p.Lock() - defer p.Unlock() - p.epsilon = newEpsilon -} - -func (p *epsilonGreedyHostPool) SetHosts(hosts []string) { - p.Lock() - defer p.Unlock() - p.standardHostPool.setHosts(hosts) - for _, h := range p.hostList { - h.epsilonCounts = make([]int64, epsilonBuckets) - h.epsilonValues = make([]int64, epsilonBuckets) - } -} - -func (p *epsilonGreedyHostPool) epsilonGreedyDecay() { - durationPerBucket := p.decayDuration / epsilonBuckets - ticker := time.NewTicker(durationPerBucket) - for { - select { - case <-p.quit: - ticker.Stop() - return - case <-ticker.C: - p.performEpsilonGreedyDecay() - } - } -} -func (p *epsilonGreedyHostPool) performEpsilonGreedyDecay() { - p.Lock() - for _, h := range p.hostList { - h.epsilonIndex += 1 - h.epsilonIndex = h.epsilonIndex % epsilonBuckets - h.epsilonCounts[h.epsilonIndex] = 0 - h.epsilonValues[h.epsilonIndex] = 0 - } - p.Unlock() -} - -func (p *epsilonGreedyHostPool) Get() HostPoolResponse { - p.Lock() - defer p.Unlock() - host := p.getEpsilonGreedy() - if host == "" { - return nil - } - - started := time.Now() - return &epsilonHostPoolResponse{ - standardHostPoolResponse: standardHostPoolResponse{host: host, pool: p}, - started: started, - } -} - -func (p *epsilonGreedyHostPool) getEpsilonGreedy() string { - var hostToUse *hostEntry - - // this is our exploration phase - if rand.Float32() < p.epsilon { - p.epsilon = p.epsilon * epsilonDecay - if p.epsilon < minEpsilon { - p.epsilon = minEpsilon - } - return p.getRoundRobin() - } - - // calculate values for each host in the 0..1 range (but not ormalized) - var possibleHosts []*hostEntry - now := time.Now() - var sumValues float64 - for _, h := range p.hostList { - if h.canTryHost(now) { - v := h.getWeightedAverageResponseTime() - if v > 0 { - ev := p.CalcValueFromAvgResponseTime(v) - h.epsilonValue = ev - sumValues += ev - possibleHosts = append(possibleHosts, h) - } - } - } - - if len(possibleHosts) != 0 { - // now normalize to the 0..1 range to get a percentage - for _, h := range possibleHosts { - h.epsilonPercentage = h.epsilonValue / sumValues - } - - // do a weighted random choice among hosts - ceiling := 0.0 - pickPercentage := rand.Float64() - for _, h := range possibleHosts { - ceiling += h.epsilonPercentage - if pickPercentage <= ceiling { - hostToUse = h - break - } - } - } - - if hostToUse == nil { - if len(possibleHosts) != 0 { - log.Println("Failed to randomly choose a host, Dan loses") - } - - return p.getRoundRobin() - } - - if hostToUse.dead { - hostToUse.willRetryHost(p.maxRetryInterval) - } - return hostToUse.host -} - -func (p *epsilonGreedyHostPool) markSuccess(hostR HostPoolResponse) { - // first do the base markSuccess - a little redundant with host lookup but cleaner than repeating logic - p.standardHostPool.markSuccess(hostR) - eHostR, ok := hostR.(*epsilonHostPoolResponse) - if !ok { - log.Printf("Incorrect type in eps markSuccess!") // TODO reflection to print out offending type - return - } - host := eHostR.host - duration := p.between(eHostR.started, eHostR.ended) - - p.Lock() - defer p.Unlock() - h, ok := p.hosts[host] - if !ok { - log.Fatalf("host %s not in HostPool %v", host, p.Hosts()) - } - h.epsilonCounts[h.epsilonIndex]++ - h.epsilonValues[h.epsilonIndex] += int64(duration.Seconds() * 1000) -} - -// --- timer: this just exists for testing - -type timer interface { - between(time.Time, time.Time) time.Duration -} - -type realTimer struct{} - -func (rt *realTimer) between(start time.Time, end time.Time) time.Duration { - return end.Sub(start) -} diff --git a/vendor/github.com/hailocab/go-hostpool/epsilon_value_calculators.go b/vendor/github.com/hailocab/go-hostpool/epsilon_value_calculators.go deleted file mode 100644 index 9bc3102a92..0000000000 --- a/vendor/github.com/hailocab/go-hostpool/epsilon_value_calculators.go +++ /dev/null @@ -1,40 +0,0 @@ -package hostpool - -// --- Value Calculators ----------------- - -import ( - "math" -) - -// --- Definitions ----------------------- - -// Structs implementing this interface are used to convert the average response time for a host -// into a score that can be used to weight hosts in the epsilon greedy hostpool. Lower response -// times should yield higher scores (we want to select the faster hosts more often) The default -// LinearEpsilonValueCalculator just uses the reciprocal of the response time. In practice, any -// decreasing function from the positive reals to the positive reals should work. -type EpsilonValueCalculator interface { - CalcValueFromAvgResponseTime(float64) float64 -} - -type LinearEpsilonValueCalculator struct{} -type LogEpsilonValueCalculator struct{ LinearEpsilonValueCalculator } -type PolynomialEpsilonValueCalculator struct { - LinearEpsilonValueCalculator - Exp float64 // the exponent to which we will raise the value to reweight -} - -// -------- Methods ----------------------- - -func (c *LinearEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 { - return 1.0 / v -} - -func (c *LogEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 { - // we need to add 1 to v so that this will be defined on all positive floats - return c.LinearEpsilonValueCalculator.CalcValueFromAvgResponseTime(math.Log(v + 1.0)) -} - -func (c *PolynomialEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 { - return c.LinearEpsilonValueCalculator.CalcValueFromAvgResponseTime(math.Pow(v, c.Exp)) -} diff --git a/vendor/github.com/hailocab/go-hostpool/host_entry.go b/vendor/github.com/hailocab/go-hostpool/host_entry.go deleted file mode 100644 index dcec9a0b70..0000000000 --- a/vendor/github.com/hailocab/go-hostpool/host_entry.go +++ /dev/null @@ -1,62 +0,0 @@ -package hostpool - -import ( - "time" -) - -// --- hostEntry - this is due to get upgraded - -type hostEntry struct { - host string - nextRetry time.Time - retryCount int16 - retryDelay time.Duration - dead bool - epsilonCounts []int64 - epsilonValues []int64 - epsilonIndex int - epsilonValue float64 - epsilonPercentage float64 -} - -func (h *hostEntry) canTryHost(now time.Time) bool { - if !h.dead { - return true - } - if h.nextRetry.Before(now) { - return true - } - return false -} - -func (h *hostEntry) willRetryHost(maxRetryInterval time.Duration) { - h.retryCount += 1 - newDelay := h.retryDelay * 2 - if newDelay < maxRetryInterval { - h.retryDelay = newDelay - } else { - h.retryDelay = maxRetryInterval - } - h.nextRetry = time.Now().Add(h.retryDelay) -} - -func (h *hostEntry) getWeightedAverageResponseTime() float64 { - var value float64 - var lastValue float64 - - // start at 1 so we start with the oldest entry - for i := 1; i <= epsilonBuckets; i += 1 { - pos := (h.epsilonIndex + i) % epsilonBuckets - bucketCount := h.epsilonCounts[pos] - // Changing the line below to what I think it should be to get the weights right - weight := float64(i) / float64(epsilonBuckets) - if bucketCount > 0 { - currentValue := float64(h.epsilonValues[pos]) / float64(bucketCount) - value += currentValue * weight - lastValue = currentValue - } else { - value += lastValue * weight - } - } - return value -} diff --git a/vendor/github.com/hailocab/go-hostpool/hostpool.go b/vendor/github.com/hailocab/go-hostpool/hostpool.go deleted file mode 100644 index 702ca9276a..0000000000 --- a/vendor/github.com/hailocab/go-hostpool/hostpool.go +++ /dev/null @@ -1,243 +0,0 @@ -// A Go package to intelligently and flexibly pool among multiple hosts from your Go application. -// Host selection can operate in round robin or epsilon greedy mode, and unresponsive hosts are -// avoided. A good overview of Epsilon Greedy is here http://stevehanov.ca/blog/index.php?id=132 -package hostpool - -import ( - "log" - "sync" - "time" -) - -// Returns current version -func Version() string { - return "0.1" -} - -// --- Response interfaces and structs ---- - -// This interface represents the response from HostPool. You can retrieve the -// hostname by calling Host(), and after making a request to the host you should -// call Mark with any error encountered, which will inform the HostPool issuing -// the HostPoolResponse of what happened to the request and allow it to update. -type HostPoolResponse interface { - Host() string - Mark(error) - hostPool() HostPool -} - -type standardHostPoolResponse struct { - host string - sync.Once - pool HostPool -} - -// --- HostPool structs and interfaces ---- - -// This is the main HostPool interface. Structs implementing this interface -// allow you to Get a HostPoolResponse (which includes a hostname to use), -// get the list of all Hosts, and use ResetAll to reset state. -type HostPool interface { - Get() HostPoolResponse - // keep the marks separate so we can override independently - markSuccess(HostPoolResponse) - markFailed(HostPoolResponse) - - ResetAll() - // ReturnUnhealthy when called with true will prevent an unhealthy node from - // being returned and will instead return a nil HostPoolResponse. If using - // this feature then you should check the result of Get for nil - ReturnUnhealthy(v bool) - Hosts() []string - SetHosts([]string) - - // Close the hostpool and release all resources. - Close() -} - -type standardHostPool struct { - sync.RWMutex - hosts map[string]*hostEntry - hostList []*hostEntry - returnUnhealthy bool - initialRetryDelay time.Duration - maxRetryInterval time.Duration - nextHostIndex int -} - -// ------ constants ------------------- - -const epsilonBuckets = 120 -const epsilonDecay = 0.90 // decay the exploration rate -const minEpsilon = 0.01 // explore one percent of the time -const initialEpsilon = 0.3 -const defaultDecayDuration = time.Duration(5) * time.Minute - -// Construct a basic HostPool using the hostnames provided -func New(hosts []string) HostPool { - p := &standardHostPool{ - returnUnhealthy: true, - hosts: make(map[string]*hostEntry, len(hosts)), - hostList: make([]*hostEntry, len(hosts)), - initialRetryDelay: time.Duration(30) * time.Second, - maxRetryInterval: time.Duration(900) * time.Second, - } - - for i, h := range hosts { - e := &hostEntry{ - host: h, - retryDelay: p.initialRetryDelay, - } - p.hosts[h] = e - p.hostList[i] = e - } - - return p -} - -func (r *standardHostPoolResponse) Host() string { - return r.host -} - -func (r *standardHostPoolResponse) hostPool() HostPool { - return r.pool -} - -func (r *standardHostPoolResponse) Mark(err error) { - r.Do(func() { - doMark(err, r) - }) -} - -func doMark(err error, r HostPoolResponse) { - if err == nil { - r.hostPool().markSuccess(r) - } else { - r.hostPool().markFailed(r) - } -} - -// return an entry from the HostPool -func (p *standardHostPool) Get() HostPoolResponse { - p.Lock() - defer p.Unlock() - host := p.getRoundRobin() - if host == "" { - return nil - } - - return &standardHostPoolResponse{host: host, pool: p} -} - -func (p *standardHostPool) getRoundRobin() string { - now := time.Now() - hostCount := len(p.hostList) - for i := range p.hostList { - // iterate via sequenece from where we last iterated - currentIndex := (i + p.nextHostIndex) % hostCount - - h := p.hostList[currentIndex] - if !h.dead { - p.nextHostIndex = currentIndex + 1 - return h.host - } - if h.nextRetry.Before(now) { - h.willRetryHost(p.maxRetryInterval) - p.nextHostIndex = currentIndex + 1 - return h.host - } - } - - // all hosts are down and returnUnhealhy is false then return no host - if !p.returnUnhealthy { - return "" - } - - // all hosts are down. re-add them - p.doResetAll() - p.nextHostIndex = 0 - return p.hostList[0].host -} - -func (p *standardHostPool) ResetAll() { - p.Lock() - defer p.Unlock() - p.doResetAll() -} - -func (p *standardHostPool) SetHosts(hosts []string) { - p.Lock() - defer p.Unlock() - p.setHosts(hosts) -} - -func (p *standardHostPool) ReturnUnhealthy(v bool) { - p.Lock() - defer p.Unlock() - p.returnUnhealthy = v -} - -func (p *standardHostPool) setHosts(hosts []string) { - p.hosts = make(map[string]*hostEntry, len(hosts)) - p.hostList = make([]*hostEntry, len(hosts)) - - for i, h := range hosts { - e := &hostEntry{ - host: h, - retryDelay: p.initialRetryDelay, - } - p.hosts[h] = e - p.hostList[i] = e - } -} - -// this actually performs the logic to reset, -// and should only be called when the lock has -// already been acquired -func (p *standardHostPool) doResetAll() { - for _, h := range p.hosts { - h.dead = false - } -} - -func (p *standardHostPool) Close() { - for _, h := range p.hosts { - h.dead = true - } -} - -func (p *standardHostPool) markSuccess(hostR HostPoolResponse) { - host := hostR.Host() - p.Lock() - defer p.Unlock() - - h, ok := p.hosts[host] - if !ok { - log.Fatalf("host %s not in HostPool %v", host, p.Hosts()) - } - h.dead = false -} - -func (p *standardHostPool) markFailed(hostR HostPoolResponse) { - host := hostR.Host() - p.Lock() - defer p.Unlock() - h, ok := p.hosts[host] - if !ok { - log.Fatalf("host %s not in HostPool %v", host, p.Hosts()) - } - if !h.dead { - h.dead = true - h.retryCount = 0 - h.retryDelay = p.initialRetryDelay - h.nextRetry = time.Now().Add(h.retryDelay) - } - -} -func (p *standardHostPool) Hosts() []string { - hosts := make([]string, 0, len(p.hosts)) - for host := range p.hosts { - hosts = append(hosts, host) - } - return hosts -} diff --git a/vendor/github.com/hashicorp/consul/LICENSE b/vendor/github.com/hashicorp/consul/LICENSE deleted file mode 100644 index c33dcc7c92..0000000000 --- a/vendor/github.com/hashicorp/consul/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md deleted file mode 100644 index 7e64988f42..0000000000 --- a/vendor/github.com/hashicorp/consul/api/README.md +++ /dev/null @@ -1,43 +0,0 @@ -Consul API client -================= - -This package provides the `api` package which attempts to -provide programmatic access to the full Consul API. - -Currently, all of the Consul APIs included in version 0.6.0 are supported. - -Documentation -============= - -The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api) - -Usage -===== - -Below is an example of using the Consul client: - -```go -// Get a new client -client, err := api.NewClient(api.DefaultConfig()) -if err != nil { - panic(err) -} - -// Get a handle to the KV API -kv := client.KV() - -// PUT a new KV pair -p := &api.KVPair{Key: "foo", Value: []byte("test")} -_, err = kv.Put(p, nil) -if err != nil { - panic(err) -} - -// Lookup the pair -pair, _, err := kv.Get("foo", nil) -if err != nil { - panic(err) -} -fmt.Printf("KV: %v", pair) - -``` diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go deleted file mode 100644 index 8ec9aa5855..0000000000 --- a/vendor/github.com/hashicorp/consul/api/acl.go +++ /dev/null @@ -1,193 +0,0 @@ -package api - -import ( - "time" -) - -const ( - // ACLClientType is the client type token - ACLClientType = "client" - - // ACLManagementType is the management type token - ACLManagementType = "management" -) - -// ACLEntry is used to represent an ACL entry -type ACLEntry struct { - CreateIndex uint64 - ModifyIndex uint64 - ID string - Name string - Type string - Rules string -} - -// ACLReplicationStatus is used to represent the status of ACL replication. -type ACLReplicationStatus struct { - Enabled bool - Running bool - SourceDatacenter string - ReplicatedIndex uint64 - LastSuccess time.Time - LastError time.Time -} - -// ACL can be used to query the ACL endpoints -type ACL struct { - c *Client -} - -// ACL returns a handle to the ACL endpoints -func (c *Client) ACL() *ACL { - return &ACL{c} -} - -// Bootstrap is used to perform a one-time ACL bootstrap operation on a cluster -// to get the first management token. -func (a *ACL) Bootstrap() (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/bootstrap") - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Create is used to generate a new token with the given parameters -func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/create") - r.setWriteOptions(q) - r.obj = acl - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Update is used to update the rules of an existing token -func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/update") - r.setWriteOptions(q) - r.obj = acl - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// Destroy is used to destroy a given ACL token ID -func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) - r.setWriteOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// Clone is used to return a new token cloned from an existing one -func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) - r.setWriteOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Info is used to query for information about an ACL token -func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/info/"+id) - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List is used to get all the ACL tokens -func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/list") - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// Replication returns the status of the ACL replication process in the datacenter -func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/replication") - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries *ACLReplicationStatus - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go deleted file mode 100644 index b42baed41d..0000000000 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ /dev/null @@ -1,627 +0,0 @@ -package api - -import ( - "bufio" - "fmt" -) - -// AgentCheck represents a check known to the agent -type AgentCheck struct { - Node string - CheckID string - Name string - Status string - Notes string - Output string - ServiceID string - ServiceName string - Definition HealthCheckDefinition -} - -// AgentService represents a service known to the agent -type AgentService struct { - ID string - Service string - Tags []string - Port int - Address string - EnableTagOverride bool - CreateIndex uint64 - ModifyIndex uint64 -} - -// AgentMember represents a cluster member known to the agent -type AgentMember struct { - Name string - Addr string - Port uint16 - Tags map[string]string - Status int - ProtocolMin uint8 - ProtocolMax uint8 - ProtocolCur uint8 - DelegateMin uint8 - DelegateMax uint8 - DelegateCur uint8 -} - -// AllSegments is used to select for all segments in MembersOpts. -const AllSegments = "_all" - -// MembersOpts is used for querying member information. -type MembersOpts struct { - // WAN is whether to show members from the WAN. - WAN bool - - // Segment is the LAN segment to show members for. Setting this to the - // AllSegments value above will show members in all segments. - Segment string -} - -// AgentServiceRegistration is used to register a new service -type AgentServiceRegistration struct { - ID string `json:",omitempty"` - Name string `json:",omitempty"` - Tags []string `json:",omitempty"` - Port int `json:",omitempty"` - Address string `json:",omitempty"` - EnableTagOverride bool `json:",omitempty"` - Meta map[string]string `json:",omitempty"` - Check *AgentServiceCheck - Checks AgentServiceChecks -} - -// AgentCheckRegistration is used to register a new check -type AgentCheckRegistration struct { - ID string `json:",omitempty"` - Name string `json:",omitempty"` - Notes string `json:",omitempty"` - ServiceID string `json:",omitempty"` - AgentServiceCheck -} - -// AgentServiceCheck is used to define a node or service level check -type AgentServiceCheck struct { - CheckID string `json:",omitempty"` - Name string `json:",omitempty"` - Args []string `json:"ScriptArgs,omitempty"` - Script string `json:",omitempty"` // Deprecated, use Args. - DockerContainerID string `json:",omitempty"` - Shell string `json:",omitempty"` // Only supported for Docker. - Interval string `json:",omitempty"` - Timeout string `json:",omitempty"` - TTL string `json:",omitempty"` - HTTP string `json:",omitempty"` - Header map[string][]string `json:",omitempty"` - Method string `json:",omitempty"` - TCP string `json:",omitempty"` - Status string `json:",omitempty"` - Notes string `json:",omitempty"` - TLSSkipVerify bool `json:",omitempty"` - GRPC string `json:",omitempty"` - GRPCUseTLS bool `json:",omitempty"` - - // In Consul 0.7 and later, checks that are associated with a service - // may also contain this optional DeregisterCriticalServiceAfter field, - // which is a timeout in the same Go time format as Interval and TTL. If - // a check is in the critical state for more than this configured value, - // then its associated service (and all of its associated checks) will - // automatically be deregistered. - DeregisterCriticalServiceAfter string `json:",omitempty"` -} -type AgentServiceChecks []*AgentServiceCheck - -// AgentToken is used when updating ACL tokens for an agent. -type AgentToken struct { - Token string -} - -// Metrics info is used to store different types of metric values from the agent. -type MetricsInfo struct { - Timestamp string - Gauges []GaugeValue - Points []PointValue - Counters []SampledValue - Samples []SampledValue -} - -// GaugeValue stores one value that is updated as time goes on, such as -// the amount of memory allocated. -type GaugeValue struct { - Name string - Value float32 - Labels map[string]string -} - -// PointValue holds a series of points for a metric. -type PointValue struct { - Name string - Points []float32 -} - -// SampledValue stores info about a metric that is incremented over time, -// such as the number of requests to an HTTP endpoint. -type SampledValue struct { - Name string - Count int - Sum float64 - Min float64 - Max float64 - Mean float64 - Stddev float64 - Labels map[string]string -} - -// Agent can be used to query the Agent endpoints -type Agent struct { - c *Client - - // cache the node name - nodeName string -} - -// Agent returns a handle to the agent endpoints -func (c *Client) Agent() *Agent { - return &Agent{c: c} -} - -// Self is used to query the agent we are speaking to for -// information about itself -func (a *Agent) Self() (map[string]map[string]interface{}, error) { - r := a.c.newRequest("GET", "/v1/agent/self") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]map[string]interface{} - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Metrics is used to query the agent we are speaking to for -// its current internal metric data -func (a *Agent) Metrics() (*MetricsInfo, error) { - r := a.c.newRequest("GET", "/v1/agent/metrics") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out *MetricsInfo - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Reload triggers a configuration reload for the agent we are connected to. -func (a *Agent) Reload() error { - r := a.c.newRequest("PUT", "/v1/agent/reload") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// NodeName is used to get the node name of the agent -func (a *Agent) NodeName() (string, error) { - if a.nodeName != "" { - return a.nodeName, nil - } - info, err := a.Self() - if err != nil { - return "", err - } - name := info["Config"]["NodeName"].(string) - a.nodeName = name - return name, nil -} - -// Checks returns the locally registered checks -func (a *Agent) Checks() (map[string]*AgentCheck, error) { - r := a.c.newRequest("GET", "/v1/agent/checks") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]*AgentCheck - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Services returns the locally registered services -func (a *Agent) Services() (map[string]*AgentService, error) { - r := a.c.newRequest("GET", "/v1/agent/services") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]*AgentService - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Members returns the known gossip members. The WAN -// flag can be used to query a server for WAN members. -func (a *Agent) Members(wan bool) ([]*AgentMember, error) { - r := a.c.newRequest("GET", "/v1/agent/members") - if wan { - r.params.Set("wan", "1") - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*AgentMember - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// MembersOpts returns the known gossip members and can be passed -// additional options for WAN/segment filtering. -func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) { - r := a.c.newRequest("GET", "/v1/agent/members") - r.params.Set("segment", opts.Segment) - if opts.WAN { - r.params.Set("wan", "1") - } - - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*AgentMember - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// ServiceRegister is used to register a new service with -// the local agent -func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { - r := a.c.newRequest("PUT", "/v1/agent/service/register") - r.obj = service - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ServiceDeregister is used to deregister a service with -// the local agent -func (a *Agent) ServiceDeregister(serviceID string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// PassTTL is used to set a TTL check to the passing state. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 or changed to use -// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. -func (a *Agent) PassTTL(checkID, note string) error { - return a.updateTTL(checkID, note, "pass") -} - -// WarnTTL is used to set a TTL check to the warning state. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 or changed to use -// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. -func (a *Agent) WarnTTL(checkID, note string) error { - return a.updateTTL(checkID, note, "warn") -} - -// FailTTL is used to set a TTL check to the failing state. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 or changed to use -// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. -func (a *Agent) FailTTL(checkID, note string) error { - return a.updateTTL(checkID, note, "fail") -} - -// updateTTL is used to update the TTL of a check. This is the internal -// method that uses the old API that's present in Consul versions prior to -// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed -// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below, -// but keep the old Pass/Warn/Fail methods using the old API under the hood. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 and the server endpoints will -// be removed in 0.9. -func (a *Agent) updateTTL(checkID, note, status string) error { - switch status { - case "pass": - case "warn": - case "fail": - default: - return fmt.Errorf("Invalid status: %s", status) - } - endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) - r := a.c.newRequest("PUT", endpoint) - r.params.Set("note", note) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// checkUpdate is the payload for a PUT for a check update. -type checkUpdate struct { - // Status is one of the api.Health* states: HealthPassing - // ("passing"), HealthWarning ("warning"), or HealthCritical - // ("critical"). - Status string - - // Output is the information to post to the UI for operators as the - // output of the process that decided to hit the TTL check. This is - // different from the note field that's associated with the check - // itself. - Output string -} - -// UpdateTTL is used to update the TTL of a check. This uses the newer API -// that was introduced in Consul 0.6.4 and later. We translate the old status -// strings for compatibility (though a newer version of Consul will still be -// required to use this API). -func (a *Agent) UpdateTTL(checkID, output, status string) error { - switch status { - case "pass", HealthPassing: - status = HealthPassing - case "warn", HealthWarning: - status = HealthWarning - case "fail", HealthCritical: - status = HealthCritical - default: - return fmt.Errorf("Invalid status: %s", status) - } - - endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID) - r := a.c.newRequest("PUT", endpoint) - r.obj = &checkUpdate{ - Status: status, - Output: output, - } - - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// CheckRegister is used to register a new check with -// the local agent -func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { - r := a.c.newRequest("PUT", "/v1/agent/check/register") - r.obj = check - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// CheckDeregister is used to deregister a check with -// the local agent -func (a *Agent) CheckDeregister(checkID string) error { - r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// Join is used to instruct the agent to attempt a join to -// another cluster member -func (a *Agent) Join(addr string, wan bool) error { - r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) - if wan { - r.params.Set("wan", "1") - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// Leave is used to have the agent gracefully leave the cluster and shutdown -func (a *Agent) Leave() error { - r := a.c.newRequest("PUT", "/v1/agent/leave") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ForceLeave is used to have the agent eject a failed node -func (a *Agent) ForceLeave(node string) error { - r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// EnableServiceMaintenance toggles service maintenance mode on -// for the given service ID. -func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) - r.params.Set("enable", "true") - r.params.Set("reason", reason) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// DisableServiceMaintenance toggles service maintenance mode off -// for the given service ID. -func (a *Agent) DisableServiceMaintenance(serviceID string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) - r.params.Set("enable", "false") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// EnableNodeMaintenance toggles node maintenance mode on for the -// agent we are connected to. -func (a *Agent) EnableNodeMaintenance(reason string) error { - r := a.c.newRequest("PUT", "/v1/agent/maintenance") - r.params.Set("enable", "true") - r.params.Set("reason", reason) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// DisableNodeMaintenance toggles node maintenance mode off for the -// agent we are connected to. -func (a *Agent) DisableNodeMaintenance() error { - r := a.c.newRequest("PUT", "/v1/agent/maintenance") - r.params.Set("enable", "false") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// Monitor returns a channel which will receive streaming logs from the agent -// Providing a non-nil stopCh can be used to close the connection and stop the -// log stream. An empty string will be sent down the given channel when there's -// nothing left to stream, after which the caller should close the stopCh. -func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { - r := a.c.newRequest("GET", "/v1/agent/monitor") - r.setQueryOptions(q) - if loglevel != "" { - r.params.Add("loglevel", loglevel) - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - - logCh := make(chan string, 64) - go func() { - defer resp.Body.Close() - - scanner := bufio.NewScanner(resp.Body) - for { - select { - case <-stopCh: - close(logCh) - return - default: - } - if scanner.Scan() { - // An empty string signals to the caller that - // the scan is done, so make sure we only emit - // that when the scanner says it's done, not if - // we happen to ingest an empty line. - if text := scanner.Text(); text != "" { - logCh <- text - } else { - logCh <- " " - } - } else { - logCh <- "" - } - } - }() - - return logCh, nil -} - -// UpdateACLToken updates the agent's "acl_token". See updateToken for more -// details. -func (a *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateToken("acl_token", token, q) -} - -// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken -// for more details. -func (a *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateToken("acl_agent_token", token, q) -} - -// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See -// updateToken for more details. -func (a *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateToken("acl_agent_master_token", token, q) -} - -// UpdateACLReplicationToken updates the agent's "acl_replication_token". See -// updateToken for more details. -func (a *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateToken("acl_replication_token", token, q) -} - -// updateToken can be used to update an agent's ACL token after the agent has -// started. The tokens are not persisted, so will need to be updated again if -// the agent is restarted. -func (a *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target)) - r.setWriteOptions(q) - r.obj = &AgentToken{Token: token} - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go deleted file mode 100644 index 1cdc21e331..0000000000 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ /dev/null @@ -1,791 +0,0 @@ -package api - -import ( - "bytes" - "context" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/go-rootcerts" -) - -const ( - // HTTPAddrEnvName defines an environment variable name which sets - // the HTTP address if there is no -http-addr specified. - HTTPAddrEnvName = "CONSUL_HTTP_ADDR" - - // HTTPTokenEnvName defines an environment variable name which sets - // the HTTP token. - HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" - - // HTTPAuthEnvName defines an environment variable name which sets - // the HTTP authentication header. - HTTPAuthEnvName = "CONSUL_HTTP_AUTH" - - // HTTPSSLEnvName defines an environment variable name which sets - // whether or not to use HTTPS. - HTTPSSLEnvName = "CONSUL_HTTP_SSL" - - // HTTPCAFile defines an environment variable name which sets the - // CA file to use for talking to Consul over TLS. - HTTPCAFile = "CONSUL_CACERT" - - // HTTPCAPath defines an environment variable name which sets the - // path to a directory of CA certs to use for talking to Consul over TLS. - HTTPCAPath = "CONSUL_CAPATH" - - // HTTPClientCert defines an environment variable name which sets the - // client cert file to use for talking to Consul over TLS. - HTTPClientCert = "CONSUL_CLIENT_CERT" - - // HTTPClientKey defines an environment variable name which sets the - // client key file to use for talking to Consul over TLS. - HTTPClientKey = "CONSUL_CLIENT_KEY" - - // HTTPTLSServerName defines an environment variable name which sets the - // server name to use as the SNI host when connecting via TLS - HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME" - - // HTTPSSLVerifyEnvName defines an environment variable name which sets - // whether or not to disable certificate checking. - HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" -) - -// QueryOptions are used to parameterize a query -type QueryOptions struct { - // Providing a datacenter overwrites the DC provided - // by the Config - Datacenter string - - // AllowStale allows any Consul server (non-leader) to service - // a read. This allows for lower latency and higher throughput - AllowStale bool - - // RequireConsistent forces the read to be fully consistent. - // This is more expensive but prevents ever performing a stale - // read. - RequireConsistent bool - - // WaitIndex is used to enable a blocking query. Waits - // until the timeout or the next index is reached - WaitIndex uint64 - - // WaitTime is used to bound the duration of a wait. - // Defaults to that of the Config, but can be overridden. - WaitTime time.Duration - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string - - // Near is used to provide a node name that will sort the results - // in ascending order based on the estimated round trip time from - // that node. Setting this to "_agent" will use the agent's node - // for the sort. - Near string - - // NodeMeta is used to filter results by nodes with the given - // metadata key/value pairs. Currently, only one key/value pair can - // be provided for filtering. - NodeMeta map[string]string - - // RelayFactor is used in keyring operations to cause responses to be - // relayed back to the sender through N other random nodes. Must be - // a value from 0 to 5 (inclusive). - RelayFactor uint8 - - // ctx is an optional context pass through to the underlying HTTP - // request layer. Use Context() and WithContext() to manage this. - ctx context.Context -} - -func (o *QueryOptions) Context() context.Context { - if o != nil && o.ctx != nil { - return o.ctx - } - return context.Background() -} - -func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions { - o2 := new(QueryOptions) - if o != nil { - *o2 = *o - } - o2.ctx = ctx - return o2 -} - -// WriteOptions are used to parameterize a write -type WriteOptions struct { - // Providing a datacenter overwrites the DC provided - // by the Config - Datacenter string - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string - - // RelayFactor is used in keyring operations to cause responses to be - // relayed back to the sender through N other random nodes. Must be - // a value from 0 to 5 (inclusive). - RelayFactor uint8 - - // ctx is an optional context pass through to the underlying HTTP - // request layer. Use Context() and WithContext() to manage this. - ctx context.Context -} - -func (o *WriteOptions) Context() context.Context { - if o != nil && o.ctx != nil { - return o.ctx - } - return context.Background() -} - -func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions { - o2 := new(WriteOptions) - if o != nil { - *o2 = *o - } - o2.ctx = ctx - return o2 -} - -// QueryMeta is used to return meta data about a query -type QueryMeta struct { - // LastIndex. This can be used as a WaitIndex to perform - // a blocking query - LastIndex uint64 - - // Time of last contact from the leader for the - // server servicing the request - LastContact time.Duration - - // Is there a known leader - KnownLeader bool - - // How long did the request take - RequestTime time.Duration - - // Is address translation enabled for HTTP responses on this agent - AddressTranslationEnabled bool -} - -// WriteMeta is used to return meta data about a write -type WriteMeta struct { - // How long did the request take - RequestTime time.Duration -} - -// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication -type HttpBasicAuth struct { - // Username to use for HTTP Basic Authentication - Username string - - // Password to use for HTTP Basic Authentication - Password string -} - -// Config is used to configure the creation of a client -type Config struct { - // Address is the address of the Consul server - Address string - - // Scheme is the URI scheme for the Consul server - Scheme string - - // Datacenter to use. If not provided, the default agent datacenter is used. - Datacenter string - - // Transport is the Transport to use for the http client. - Transport *http.Transport - - // HttpClient is the client to use. Default will be - // used if not provided. - HttpClient *http.Client - - // HttpAuth is the auth info to use for http access. - HttpAuth *HttpBasicAuth - - // WaitTime limits how long a Watch will block. If not provided, - // the agent default values will be used. - WaitTime time.Duration - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string - - TLSConfig TLSConfig -} - -// TLSConfig is used to generate a TLSClientConfig that's useful for talking to -// Consul using TLS. -type TLSConfig struct { - // Address is the optional address of the Consul server. The port, if any - // will be removed from here and this will be set to the ServerName of the - // resulting config. - Address string - - // CAFile is the optional path to the CA certificate used for Consul - // communication, defaults to the system bundle if not specified. - CAFile string - - // CAPath is the optional path to a directory of CA certificates to use for - // Consul communication, defaults to the system bundle if not specified. - CAPath string - - // CertFile is the optional path to the certificate for Consul - // communication. If this is set then you need to also set KeyFile. - CertFile string - - // KeyFile is the optional path to the private key for Consul communication. - // If this is set then you need to also set CertFile. - KeyFile string - - // InsecureSkipVerify if set to true will disable TLS host verification. - InsecureSkipVerify bool -} - -// DefaultConfig returns a default configuration for the client. By default this -// will pool and reuse idle connections to Consul. If you have a long-lived -// client object, this is the desired behavior and should make the most efficient -// use of the connections to Consul. If you don't reuse a client object , which -// is not recommended, then you may notice idle connections building up over -// time. To avoid this, use the DefaultNonPooledConfig() instead. -func DefaultConfig() *Config { - return defaultConfig(cleanhttp.DefaultPooledTransport) -} - -// DefaultNonPooledConfig returns a default configuration for the client which -// does not pool connections. This isn't a recommended configuration because it -// will reconnect to Consul on every request, but this is useful to avoid the -// accumulation of idle connections if you make many client objects during the -// lifetime of your application. -func DefaultNonPooledConfig() *Config { - return defaultConfig(cleanhttp.DefaultTransport) -} - -// defaultConfig returns the default configuration for the client, using the -// given function to make the transport. -func defaultConfig(transportFn func() *http.Transport) *Config { - config := &Config{ - Address: "127.0.0.1:8500", - Scheme: "http", - Transport: transportFn(), - } - - if addr := os.Getenv(HTTPAddrEnvName); addr != "" { - config.Address = addr - } - - if token := os.Getenv(HTTPTokenEnvName); token != "" { - config.Token = token - } - - if auth := os.Getenv(HTTPAuthEnvName); auth != "" { - var username, password string - if strings.Contains(auth, ":") { - split := strings.SplitN(auth, ":", 2) - username = split[0] - password = split[1] - } else { - username = auth - } - - config.HttpAuth = &HttpBasicAuth{ - Username: username, - Password: password, - } - } - - if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" { - enabled, err := strconv.ParseBool(ssl) - if err != nil { - log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err) - } - - if enabled { - config.Scheme = "https" - } - } - - if v := os.Getenv(HTTPTLSServerName); v != "" { - config.TLSConfig.Address = v - } - if v := os.Getenv(HTTPCAFile); v != "" { - config.TLSConfig.CAFile = v - } - if v := os.Getenv(HTTPCAPath); v != "" { - config.TLSConfig.CAPath = v - } - if v := os.Getenv(HTTPClientCert); v != "" { - config.TLSConfig.CertFile = v - } - if v := os.Getenv(HTTPClientKey); v != "" { - config.TLSConfig.KeyFile = v - } - if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" { - doVerify, err := strconv.ParseBool(v) - if err != nil { - log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err) - } - if !doVerify { - config.TLSConfig.InsecureSkipVerify = true - } - } - - return config -} - -// TLSConfig is used to generate a TLSClientConfig that's useful for talking to -// Consul using TLS. -func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) { - tlsClientConfig := &tls.Config{ - InsecureSkipVerify: tlsConfig.InsecureSkipVerify, - } - - if tlsConfig.Address != "" { - server := tlsConfig.Address - hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]") - if hasPort { - var err error - server, _, err = net.SplitHostPort(server) - if err != nil { - return nil, err - } - } - tlsClientConfig.ServerName = server - } - - if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" { - tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile) - if err != nil { - return nil, err - } - tlsClientConfig.Certificates = []tls.Certificate{tlsCert} - } - - if tlsConfig.CAFile != "" || tlsConfig.CAPath != "" { - rootConfig := &rootcerts.Config{ - CAFile: tlsConfig.CAFile, - CAPath: tlsConfig.CAPath, - } - if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil { - return nil, err - } - } - - return tlsClientConfig, nil -} - -// Client provides a client to the Consul API -type Client struct { - config Config -} - -// NewClient returns a new client -func NewClient(config *Config) (*Client, error) { - // bootstrap the config - defConfig := DefaultConfig() - - if len(config.Address) == 0 { - config.Address = defConfig.Address - } - - if len(config.Scheme) == 0 { - config.Scheme = defConfig.Scheme - } - - if config.Transport == nil { - config.Transport = defConfig.Transport - } - - if config.TLSConfig.Address == "" { - config.TLSConfig.Address = defConfig.TLSConfig.Address - } - - if config.TLSConfig.CAFile == "" { - config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile - } - - if config.TLSConfig.CAPath == "" { - config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath - } - - if config.TLSConfig.CertFile == "" { - config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile - } - - if config.TLSConfig.KeyFile == "" { - config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile - } - - if !config.TLSConfig.InsecureSkipVerify { - config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify - } - - if config.HttpClient == nil { - var err error - config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig) - if err != nil { - return nil, err - } - } - - parts := strings.SplitN(config.Address, "://", 2) - if len(parts) == 2 { - switch parts[0] { - case "http": - config.Scheme = "http" - case "https": - config.Scheme = "https" - case "unix": - trans := cleanhttp.DefaultTransport() - trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial("unix", parts[1]) - } - config.HttpClient = &http.Client{ - Transport: trans, - } - default: - return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0]) - } - config.Address = parts[1] - } - - if config.Token == "" { - config.Token = defConfig.Token - } - - return &Client{config: *config}, nil -} - -// NewHttpClient returns an http client configured with the given Transport and TLS -// config. -func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) { - client := &http.Client{ - Transport: transport, - } - - // TODO (slackpad) - Once we get some run time on the HTTP/2 support we - // should turn it on by default if TLS is enabled. We would basically - // just need to call http2.ConfigureTransport(transport) here. We also - // don't want to introduce another external dependency on - // golang.org/x/net/http2 at this time. For a complete recipe for how - // to enable HTTP/2 support on a transport suitable for the API client - // library see agent/http_test.go:TestHTTPServer_H2. - - if transport.TLSClientConfig == nil { - tlsClientConfig, err := SetupTLSConfig(&tlsConf) - - if err != nil { - return nil, err - } - - transport.TLSClientConfig = tlsClientConfig - } - - return client, nil -} - -// request is used to help build up a request -type request struct { - config *Config - method string - url *url.URL - params url.Values - body io.Reader - header http.Header - obj interface{} - ctx context.Context -} - -// setQueryOptions is used to annotate the request with -// additional query options -func (r *request) setQueryOptions(q *QueryOptions) { - if q == nil { - return - } - if q.Datacenter != "" { - r.params.Set("dc", q.Datacenter) - } - if q.AllowStale { - r.params.Set("stale", "") - } - if q.RequireConsistent { - r.params.Set("consistent", "") - } - if q.WaitIndex != 0 { - r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) - } - if q.WaitTime != 0 { - r.params.Set("wait", durToMsec(q.WaitTime)) - } - if q.Token != "" { - r.header.Set("X-Consul-Token", q.Token) - } - if q.Near != "" { - r.params.Set("near", q.Near) - } - if len(q.NodeMeta) > 0 { - for key, value := range q.NodeMeta { - r.params.Add("node-meta", key+":"+value) - } - } - if q.RelayFactor != 0 { - r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) - } - r.ctx = q.ctx -} - -// durToMsec converts a duration to a millisecond specified string. If the -// user selected a positive value that rounds to 0 ms, then we will use 1 ms -// so they get a short delay, otherwise Consul will translate the 0 ms into -// a huge default delay. -func durToMsec(dur time.Duration) string { - ms := dur / time.Millisecond - if dur > 0 && ms == 0 { - ms = 1 - } - return fmt.Sprintf("%dms", ms) -} - -// serverError is a string we look for to detect 500 errors. -const serverError = "Unexpected response code: 500" - -// IsRetryableError returns true for 500 errors from the Consul servers, and -// network connection errors. These are usually retryable at a later time. -// This applies to reads but NOT to writes. This may return true for errors -// on writes that may have still gone through, so do not use this to retry -// any write operations. -func IsRetryableError(err error) bool { - if err == nil { - return false - } - - if _, ok := err.(net.Error); ok { - return true - } - - // TODO (slackpad) - Make a real error type here instead of using - // a string check. - return strings.Contains(err.Error(), serverError) -} - -// setWriteOptions is used to annotate the request with -// additional write options -func (r *request) setWriteOptions(q *WriteOptions) { - if q == nil { - return - } - if q.Datacenter != "" { - r.params.Set("dc", q.Datacenter) - } - if q.Token != "" { - r.header.Set("X-Consul-Token", q.Token) - } - if q.RelayFactor != 0 { - r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) - } - r.ctx = q.ctx -} - -// toHTTP converts the request to an HTTP request -func (r *request) toHTTP() (*http.Request, error) { - // Encode the query parameters - r.url.RawQuery = r.params.Encode() - - // Check if we should encode the body - if r.body == nil && r.obj != nil { - b, err := encodeBody(r.obj) - if err != nil { - return nil, err - } - r.body = b - } - - // Create the HTTP request - req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) - if err != nil { - return nil, err - } - - req.URL.Host = r.url.Host - req.URL.Scheme = r.url.Scheme - req.Host = r.url.Host - req.Header = r.header - - // Setup auth - if r.config.HttpAuth != nil { - req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) - } - if r.ctx != nil { - return req.WithContext(r.ctx), nil - } - - return req, nil -} - -// newRequest is used to create a new request -func (c *Client) newRequest(method, path string) *request { - r := &request{ - config: &c.config, - method: method, - url: &url.URL{ - Scheme: c.config.Scheme, - Host: c.config.Address, - Path: path, - }, - params: make(map[string][]string), - header: make(http.Header), - } - if c.config.Datacenter != "" { - r.params.Set("dc", c.config.Datacenter) - } - if c.config.WaitTime != 0 { - r.params.Set("wait", durToMsec(r.config.WaitTime)) - } - if c.config.Token != "" { - r.header.Set("X-Consul-Token", r.config.Token) - } - return r -} - -// doRequest runs a request with our client -func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { - req, err := r.toHTTP() - if err != nil { - return 0, nil, err - } - start := time.Now() - resp, err := c.config.HttpClient.Do(req) - diff := time.Since(start) - return diff, resp, err -} - -// Query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Consul conventions. -func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - r := c.newRequest("GET", endpoint) - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if err := decodeBody(resp, out); err != nil { - return nil, err - } - return qm, nil -} - -// write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Consul conventions. -func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - r := c.newRequest("PUT", endpoint) - r.setWriteOptions(q) - r.obj = in - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - if out != nil { - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - } else if _, err := ioutil.ReadAll(resp.Body); err != nil { - return nil, err - } - return wm, nil -} - -// parseQueryMeta is used to help parse query meta-data -func parseQueryMeta(resp *http.Response, q *QueryMeta) error { - header := resp.Header - - // Parse the X-Consul-Index - index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) - } - q.LastIndex = index - - // Parse the X-Consul-LastContact - last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) - } - q.LastContact = time.Duration(last) * time.Millisecond - - // Parse the X-Consul-KnownLeader - switch header.Get("X-Consul-KnownLeader") { - case "true": - q.KnownLeader = true - default: - q.KnownLeader = false - } - - // Parse X-Consul-Translate-Addresses - switch header.Get("X-Consul-Translate-Addresses") { - case "true": - q.AddressTranslationEnabled = true - default: - q.AddressTranslationEnabled = false - } - - return nil -} - -// decodeBody is used to JSON decode a body -func decodeBody(resp *http.Response, out interface{}) error { - dec := json.NewDecoder(resp.Body) - return dec.Decode(out) -} - -// encodeBody is used to encode a request body -func encodeBody(obj interface{}) (io.Reader, error) { - buf := bytes.NewBuffer(nil) - enc := json.NewEncoder(buf) - if err := enc.Encode(obj); err != nil { - return nil, err - } - return buf, nil -} - -// requireOK is used to wrap doRequest and check for a 200 -func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { - if e != nil { - if resp != nil { - resp.Body.Close() - } - return d, nil, e - } - if resp.StatusCode != 200 { - var buf bytes.Buffer - io.Copy(&buf, resp.Body) - resp.Body.Close() - return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) - } - return d, resp, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go deleted file mode 100644 index 80ce1bc815..0000000000 --- a/vendor/github.com/hashicorp/consul/api/catalog.go +++ /dev/null @@ -1,200 +0,0 @@ -package api - -type Node struct { - ID string - Node string - Address string - Datacenter string - TaggedAddresses map[string]string - Meta map[string]string - CreateIndex uint64 - ModifyIndex uint64 -} - -type CatalogService struct { - ID string - Node string - Address string - Datacenter string - TaggedAddresses map[string]string - NodeMeta map[string]string - ServiceID string - ServiceName string - ServiceAddress string - ServiceTags []string - ServiceMeta map[string]string - ServicePort int - ServiceEnableTagOverride bool - CreateIndex uint64 - ModifyIndex uint64 -} - -type CatalogNode struct { - Node *Node - Services map[string]*AgentService -} - -type CatalogRegistration struct { - ID string - Node string - Address string - TaggedAddresses map[string]string - NodeMeta map[string]string - Datacenter string - Service *AgentService - Check *AgentCheck - SkipNodeUpdate bool -} - -type CatalogDeregistration struct { - Node string - Address string // Obsolete. - Datacenter string - ServiceID string - CheckID string -} - -// Catalog can be used to query the Catalog endpoints -type Catalog struct { - c *Client -} - -// Catalog returns a handle to the catalog endpoints -func (c *Client) Catalog() *Catalog { - return &Catalog{c} -} - -func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/catalog/register") - r.setWriteOptions(q) - r.obj = reg - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/catalog/deregister") - r.setWriteOptions(q) - r.obj = dereg - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -// Datacenters is used to query for all the known datacenters -func (c *Catalog) Datacenters() ([]string, error) { - r := c.c.newRequest("GET", "/v1/catalog/datacenters") - _, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []string - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Nodes is used to query all the known nodes -func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/nodes") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*Node - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Services is used to query for all known services -func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/services") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out map[string][]string - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Service is used to query catalog entries for a given service -func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/service/"+service) - r.setQueryOptions(q) - if tag != "" { - r.params.Set("tag", tag) - } - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*CatalogService - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Node is used to query for service information about a single node -func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out *CatalogNode - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go deleted file mode 100644 index 53318f11dd..0000000000 --- a/vendor/github.com/hashicorp/consul/api/coordinate.go +++ /dev/null @@ -1,106 +0,0 @@ -package api - -import ( - "github.com/hashicorp/serf/coordinate" -) - -// CoordinateEntry represents a node and its associated network coordinate. -type CoordinateEntry struct { - Node string - Segment string - Coord *coordinate.Coordinate -} - -// CoordinateDatacenterMap has the coordinates for servers in a given datacenter -// and area. Network coordinates are only compatible within the same area. -type CoordinateDatacenterMap struct { - Datacenter string - AreaID string - Coordinates []CoordinateEntry -} - -// Coordinate can be used to query the coordinate endpoints -type Coordinate struct { - c *Client -} - -// Coordinate returns a handle to the coordinate endpoints -func (c *Client) Coordinate() *Coordinate { - return &Coordinate{c} -} - -// Datacenters is used to return the coordinates of all the servers in the WAN -// pool. -func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) { - r := c.c.newRequest("GET", "/v1/coordinate/datacenters") - _, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*CoordinateDatacenterMap - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Nodes is used to return the coordinates of all the nodes in the LAN pool. -func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/coordinate/nodes") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*CoordinateEntry - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Update inserts or updates the LAN coordinate of a node. -func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/coordinate/update") - r.setWriteOptions(q) - r.obj = coord - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -// Node is used to return the coordinates of a single in the LAN pool. -func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/coordinate/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*CoordinateEntry - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go deleted file mode 100644 index 85b5b069b0..0000000000 --- a/vendor/github.com/hashicorp/consul/api/event.go +++ /dev/null @@ -1,104 +0,0 @@ -package api - -import ( - "bytes" - "strconv" -) - -// Event can be used to query the Event endpoints -type Event struct { - c *Client -} - -// UserEvent represents an event that was fired by the user -type UserEvent struct { - ID string - Name string - Payload []byte - NodeFilter string - ServiceFilter string - TagFilter string - Version int - LTime uint64 -} - -// Event returns a handle to the event endpoints -func (c *Client) Event() *Event { - return &Event{c} -} - -// Fire is used to fire a new user event. Only the Name, Payload and Filters -// are respected. This returns the ID or an associated error. Cross DC requests -// are supported. -func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { - r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) - r.setWriteOptions(q) - if params.NodeFilter != "" { - r.params.Set("node", params.NodeFilter) - } - if params.ServiceFilter != "" { - r.params.Set("service", params.ServiceFilter) - } - if params.TagFilter != "" { - r.params.Set("tag", params.TagFilter) - } - if params.Payload != nil { - r.body = bytes.NewReader(params.Payload) - } - - rtt, resp, err := requireOK(e.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out UserEvent - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// List is used to get the most recent events an agent has received. -// This list can be optionally filtered by the name. This endpoint supports -// quasi-blocking queries. The index is not monotonic, nor does it provide provide -// LastContact or KnownLeader. -func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { - r := e.c.newRequest("GET", "/v1/event/list") - r.setQueryOptions(q) - if name != "" { - r.params.Set("name", name) - } - rtt, resp, err := requireOK(e.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*UserEvent - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// IDToIndex is a bit of a hack. This simulates the index generation to -// convert an event ID into a WaitIndex. -func (e *Event) IDToIndex(uuid string) uint64 { - lower := uuid[0:8] + uuid[9:13] + uuid[14:18] - upper := uuid[19:23] + uuid[24:36] - lowVal, err := strconv.ParseUint(lower, 16, 64) - if err != nil { - panic("Failed to convert " + lower) - } - highVal, err := strconv.ParseUint(upper, 16, 64) - if err != nil { - panic("Failed to convert " + upper) - } - return lowVal ^ highVal -} diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go deleted file mode 100644 index 53f3de4f79..0000000000 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ /dev/null @@ -1,215 +0,0 @@ -package api - -import ( - "fmt" - "strings" -) - -const ( - // HealthAny is special, and is used as a wild card, - // not as a specific state. - HealthAny = "any" - HealthPassing = "passing" - HealthWarning = "warning" - HealthCritical = "critical" - HealthMaint = "maintenance" -) - -const ( - // NodeMaint is the special key set by a node in maintenance mode. - NodeMaint = "_node_maintenance" - - // ServiceMaintPrefix is the prefix for a service in maintenance mode. - ServiceMaintPrefix = "_service_maintenance:" -) - -// HealthCheck is used to represent a single check -type HealthCheck struct { - Node string - CheckID string - Name string - Status string - Notes string - Output string - ServiceID string - ServiceName string - ServiceTags []string - - Definition HealthCheckDefinition -} - -// HealthCheckDefinition is used to store the details about -// a health check's execution. -type HealthCheckDefinition struct { - HTTP string - Header map[string][]string - Method string - TLSSkipVerify bool - TCP string - Interval ReadableDuration - Timeout ReadableDuration - DeregisterCriticalServiceAfter ReadableDuration -} - -// HealthChecks is a collection of HealthCheck structs. -type HealthChecks []*HealthCheck - -// AggregatedStatus returns the "best" status for the list of health checks. -// Because a given entry may have many service and node-level health checks -// attached, this function determines the best representative of the status as -// as single string using the following heuristic: -// -// maintenance > critical > warning > passing -// -func (c HealthChecks) AggregatedStatus() string { - var passing, warning, critical, maintenance bool - for _, check := range c { - id := string(check.CheckID) - if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { - maintenance = true - continue - } - - switch check.Status { - case HealthPassing: - passing = true - case HealthWarning: - warning = true - case HealthCritical: - critical = true - default: - return "" - } - } - - switch { - case maintenance: - return HealthMaint - case critical: - return HealthCritical - case warning: - return HealthWarning - case passing: - return HealthPassing - default: - return HealthPassing - } -} - -// ServiceEntry is used for the health service endpoint -type ServiceEntry struct { - Node *Node - Service *AgentService - Checks HealthChecks -} - -// Health can be used to query the Health endpoints -type Health struct { - c *Client -} - -// Health returns a handle to the health endpoints -func (c *Client) Health() *Health { - return &Health{c} -} - -// Node is used to query for checks belonging to a given node -func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out HealthChecks - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Checks is used to return the checks associated with a service -func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/checks/"+service) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out HealthChecks - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Service is used to query health information along with service info -// for a given service. It can optionally do server-side filtering on a tag -// or nodes with passing health checks only. -func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/service/"+service) - r.setQueryOptions(q) - if tag != "" { - r.params.Set("tag", tag) - } - if passingOnly { - r.params.Set(HealthPassing, "1") - } - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*ServiceEntry - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// State is used to retrieve all the checks in a given state. -// The wildcard "any" state can also be used for all checks. -func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { - switch state { - case HealthAny: - case HealthWarning: - case HealthCritical: - case HealthPassing: - default: - return nil, nil, fmt.Errorf("Unsupported state: %v", state) - } - r := h.c.newRequest("GET", "/v1/health/state/"+state) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out HealthChecks - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go deleted file mode 100644 index 97f5156855..0000000000 --- a/vendor/github.com/hashicorp/consul/api/kv.go +++ /dev/null @@ -1,420 +0,0 @@ -package api - -import ( - "bytes" - "fmt" - "io" - "net/http" - "strconv" - "strings" -) - -// KVPair is used to represent a single K/V entry -type KVPair struct { - // Key is the name of the key. It is also part of the URL path when accessed - // via the API. - Key string - - // CreateIndex holds the index corresponding the creation of this KVPair. This - // is a read-only field. - CreateIndex uint64 - - // ModifyIndex is used for the Check-And-Set operations and can also be fed - // back into the WaitIndex of the QueryOptions in order to perform blocking - // queries. - ModifyIndex uint64 - - // LockIndex holds the index corresponding to a lock on this key, if any. This - // is a read-only field. - LockIndex uint64 - - // Flags are any user-defined flags on the key. It is up to the implementer - // to check these values, since Consul does not treat them specially. - Flags uint64 - - // Value is the value for the key. This can be any value, but it will be - // base64 encoded upon transport. - Value []byte - - // Session is a string representing the ID of the session. Any other - // interactions with this key over the same session must specify the same - // session ID. - Session string -} - -// KVPairs is a list of KVPair objects -type KVPairs []*KVPair - -// KVOp constants give possible operations available in a KVTxn. -type KVOp string - -const ( - KVSet KVOp = "set" - KVDelete KVOp = "delete" - KVDeleteCAS KVOp = "delete-cas" - KVDeleteTree KVOp = "delete-tree" - KVCAS KVOp = "cas" - KVLock KVOp = "lock" - KVUnlock KVOp = "unlock" - KVGet KVOp = "get" - KVGetTree KVOp = "get-tree" - KVCheckSession KVOp = "check-session" - KVCheckIndex KVOp = "check-index" - KVCheckNotExists KVOp = "check-not-exists" -) - -// KVTxnOp defines a single operation inside a transaction. -type KVTxnOp struct { - Verb KVOp - Key string - Value []byte - Flags uint64 - Index uint64 - Session string -} - -// KVTxnOps defines a set of operations to be performed inside a single -// transaction. -type KVTxnOps []*KVTxnOp - -// KVTxnResponse has the outcome of a transaction. -type KVTxnResponse struct { - Results []*KVPair - Errors TxnErrors -} - -// KV is used to manipulate the K/V API -type KV struct { - c *Client -} - -// KV is used to return a handle to the K/V apis -func (c *Client) KV() *KV { - return &KV{c} -} - -// Get is used to lookup a single key. The returned pointer -// to the KVPair will be nil if the key does not exist. -func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { - resp, qm, err := k.getInternal(key, nil, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []*KVPair - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List is used to lookup all keys under a prefix -func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { - resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []*KVPair - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// Keys is used to list all the keys under a prefix. Optionally, -// a separator can be used to limit the responses. -func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { - params := map[string]string{"keys": ""} - if separator != "" { - params["separator"] = separator - } - resp, qm, err := k.getInternal(prefix, params, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []string - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { - r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/")) - r.setQueryOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - rtt, resp, err := k.c.doRequest(r) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == 404 { - resp.Body.Close() - return nil, qm, nil - } else if resp.StatusCode != 200 { - resp.Body.Close() - return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) - } - return resp, qm, nil -} - -// Put is used to write a new value. Only the -// Key, Flags and Value is respected. -func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { - params := make(map[string]string, 1) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - _, wm, err := k.put(p.Key, params, p.Value, q) - return wm, err -} - -// CAS is used for a Check-And-Set operation. The Key, -// ModifyIndex, Flags and Value are respected. Returns true -// on success or false on failures. -func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) - return k.put(p.Key, params, p.Value, q) -} - -// Acquire is used for a lock acquisition operation. The Key, -// Flags, Value and Session are respected. Returns true -// on success or false on failures. -func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["acquire"] = p.Session - return k.put(p.Key, params, p.Value, q) -} - -// Release is used for a lock release operation. The Key, -// Flags, Value and Session are respected. Returns true -// on success or false on failures. -func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["release"] = p.Session - return k.put(p.Key, params, p.Value, q) -} - -func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { - if len(key) > 0 && key[0] == '/' { - return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) - } - - r := k.c.newRequest("PUT", "/v1/kv/"+key) - r.setWriteOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - r.body = bytes.NewReader(body) - rtt, resp, err := requireOK(k.c.doRequest(r)) - if err != nil { - return false, nil, err - } - defer resp.Body.Close() - - qm := &WriteMeta{} - qm.RequestTime = rtt - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(buf.String(), "true") - return res, qm, nil -} - -// Delete is used to delete a single key -func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { - _, qm, err := k.deleteInternal(key, nil, w) - return qm, err -} - -// DeleteCAS is used for a Delete Check-And-Set operation. The Key -// and ModifyIndex are respected. Returns true on success or false on failures. -func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := map[string]string{ - "cas": strconv.FormatUint(p.ModifyIndex, 10), - } - return k.deleteInternal(p.Key, params, q) -} - -// DeleteTree is used to delete all keys under a prefix -func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { - _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) - return qm, err -} - -func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { - r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/")) - r.setWriteOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - rtt, resp, err := requireOK(k.c.doRequest(r)) - if err != nil { - return false, nil, err - } - defer resp.Body.Close() - - qm := &WriteMeta{} - qm.RequestTime = rtt - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(buf.String(), "true") - return res, qm, nil -} - -// TxnOp is the internal format we send to Consul. It's not specific to KV, -// though currently only KV operations are supported. -type TxnOp struct { - KV *KVTxnOp -} - -// TxnOps is a list of transaction operations. -type TxnOps []*TxnOp - -// TxnResult is the internal format we receive from Consul. -type TxnResult struct { - KV *KVPair -} - -// TxnResults is a list of TxnResult objects. -type TxnResults []*TxnResult - -// TxnError is used to return information about an operation in a transaction. -type TxnError struct { - OpIndex int - What string -} - -// TxnErrors is a list of TxnError objects. -type TxnErrors []*TxnError - -// TxnResponse is the internal format we receive from Consul. -type TxnResponse struct { - Results TxnResults - Errors TxnErrors -} - -// Txn is used to apply multiple KV operations in a single, atomic transaction. -// -// Note that Go will perform the required base64 encoding on the values -// automatically because the type is a byte slice. Transactions are defined as a -// list of operations to perform, using the KVOp constants and KVTxnOp structure -// to define operations. If any operation fails, none of the changes are applied -// to the state store. Note that this hides the internal raw transaction interface -// and munges the input and output types into KV-specific ones for ease of use. -// If there are more non-KV operations in the future we may break out a new -// transaction API client, but it will be easy to keep this KV-specific variant -// supported. -// -// Even though this is generally a write operation, we take a QueryOptions input -// and return a QueryMeta output. If the transaction contains only read ops, then -// Consul will fast-path it to a different endpoint internally which supports -// consistency controls, but not blocking. If there are write operations then -// the request will always be routed through raft and any consistency settings -// will be ignored. -// -// Here's an example: -// -// ops := KVTxnOps{ -// &KVTxnOp{ -// Verb: KVLock, -// Key: "test/lock", -// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", -// Value: []byte("hello"), -// }, -// &KVTxnOp{ -// Verb: KVGet, -// Key: "another/key", -// }, -// } -// ok, response, _, err := kv.Txn(&ops, nil) -// -// If there is a problem making the transaction request then an error will be -// returned. Otherwise, the ok value will be true if the transaction succeeded -// or false if it was rolled back. The response is a structured return value which -// will have the outcome of the transaction. Its Results member will have entries -// for each operation. Deleted keys will have a nil entry in the, and to save -// space, the Value of each key in the Results will be nil unless the operation -// is a KVGet. If the transaction was rolled back, the Errors member will have -// entries referencing the index of the operation that failed along with an error -// message. -func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) { - r := k.c.newRequest("PUT", "/v1/txn") - r.setQueryOptions(q) - - // Convert into the internal format since this is an all-KV txn. - ops := make(TxnOps, 0, len(txn)) - for _, kvOp := range txn { - ops = append(ops, &TxnOp{KV: kvOp}) - } - r.obj = ops - rtt, resp, err := k.c.doRequest(r) - if err != nil { - return false, nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { - var txnResp TxnResponse - if err := decodeBody(resp, &txnResp); err != nil { - return false, nil, nil, err - } - - // Convert from the internal format. - kvResp := KVTxnResponse{ - Errors: txnResp.Errors, - } - for _, result := range txnResp.Results { - kvResp.Results = append(kvResp.Results, result.KV) - } - return resp.StatusCode == http.StatusOK, &kvResp, qm, nil - } - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, nil, fmt.Errorf("Failed to read response: %v", err) - } - return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String()) -} diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go deleted file mode 100644 index 41f72e7d23..0000000000 --- a/vendor/github.com/hashicorp/consul/api/lock.go +++ /dev/null @@ -1,385 +0,0 @@ -package api - -import ( - "fmt" - "sync" - "time" -) - -const ( - // DefaultLockSessionName is the Session Name we assign if none is provided - DefaultLockSessionName = "Consul API Lock" - - // DefaultLockSessionTTL is the default session TTL if no Session is provided - // when creating a new Lock. This is used because we do not have another - // other check to depend upon. - DefaultLockSessionTTL = "15s" - - // DefaultLockWaitTime is how long we block for at a time to check if lock - // acquisition is possible. This affects the minimum time it takes to cancel - // a Lock acquisition. - DefaultLockWaitTime = 15 * time.Second - - // DefaultLockRetryTime is how long we wait after a failed lock acquisition - // before attempting to do the lock again. This is so that once a lock-delay - // is in effect, we do not hot loop retrying the acquisition. - DefaultLockRetryTime = 5 * time.Second - - // DefaultMonitorRetryTime is how long we wait after a failed monitor check - // of a lock (500 response code). This allows the monitor to ride out brief - // periods of unavailability, subject to the MonitorRetries setting in the - // lock options which is by default set to 0, disabling this feature. This - // affects locks and semaphores. - DefaultMonitorRetryTime = 2 * time.Second - - // LockFlagValue is a magic flag we set to indicate a key - // is being used for a lock. It is used to detect a potential - // conflict with a semaphore. - LockFlagValue = 0x2ddccbc058a50c18 -) - -var ( - // ErrLockHeld is returned if we attempt to double lock - ErrLockHeld = fmt.Errorf("Lock already held") - - // ErrLockNotHeld is returned if we attempt to unlock a lock - // that we do not hold. - ErrLockNotHeld = fmt.Errorf("Lock not held") - - // ErrLockInUse is returned if we attempt to destroy a lock - // that is in use. - ErrLockInUse = fmt.Errorf("Lock in use") - - // ErrLockConflict is returned if the flags on a key - // used for a lock do not match expectation - ErrLockConflict = fmt.Errorf("Existing key does not match lock use") -) - -// Lock is used to implement client-side leader election. It is follows the -// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html. -type Lock struct { - c *Client - opts *LockOptions - - isHeld bool - sessionRenew chan struct{} - lockSession string - l sync.Mutex -} - -// LockOptions is used to parameterize the Lock behavior. -type LockOptions struct { - Key string // Must be set and have write permissions - Value []byte // Optional, value to associate with the lock - Session string // Optional, created if not specified - SessionOpts *SessionEntry // Optional, options to use when creating a session - SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given) - SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given) - MonitorRetries int // Optional, defaults to 0 which means no retries - MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime - LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime - LockTryOnce bool // Optional, defaults to false which means try forever -} - -// LockKey returns a handle to a lock struct which can be used -// to acquire and release the mutex. The key used must have -// write permissions. -func (c *Client) LockKey(key string) (*Lock, error) { - opts := &LockOptions{ - Key: key, - } - return c.LockOpts(opts) -} - -// LockOpts returns a handle to a lock struct which can be used -// to acquire and release the mutex. The key used must have -// write permissions. -func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { - if opts.Key == "" { - return nil, fmt.Errorf("missing key") - } - if opts.SessionName == "" { - opts.SessionName = DefaultLockSessionName - } - if opts.SessionTTL == "" { - opts.SessionTTL = DefaultLockSessionTTL - } else { - if _, err := time.ParseDuration(opts.SessionTTL); err != nil { - return nil, fmt.Errorf("invalid SessionTTL: %v", err) - } - } - if opts.MonitorRetryTime == 0 { - opts.MonitorRetryTime = DefaultMonitorRetryTime - } - if opts.LockWaitTime == 0 { - opts.LockWaitTime = DefaultLockWaitTime - } - l := &Lock{ - c: c, - opts: opts, - } - return l, nil -} - -// Lock attempts to acquire the lock and blocks while doing so. -// Providing a non-nil stopCh can be used to abort the lock attempt. -// Returns a channel that is closed if our lock is lost or an error. -// This channel could be closed at any time due to session invalidation, -// communication errors, operator intervention, etc. It is NOT safe to -// assume that the lock is held until Unlock() unless the Session is specifically -// created without any associated health checks. By default Consul sessions -// prefer liveness over safety and an application must be able to handle -// the lock being lost. -func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { - // Hold the lock as we try to acquire - l.l.Lock() - defer l.l.Unlock() - - // Check if we already hold the lock - if l.isHeld { - return nil, ErrLockHeld - } - - // Check if we need to create a session first - l.lockSession = l.opts.Session - if l.lockSession == "" { - s, err := l.createSession() - if err != nil { - return nil, fmt.Errorf("failed to create session: %v", err) - } - - l.sessionRenew = make(chan struct{}) - l.lockSession = s - session := l.c.Session() - go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !l.isHeld { - close(l.sessionRenew) - l.sessionRenew = nil - } - }() - } - - // Setup the query options - kv := l.c.KV() - qOpts := &QueryOptions{ - WaitTime: l.opts.LockWaitTime, - } - - start := time.Now() - attempts := 0 -WAIT: - // Check if we should quit - select { - case <-stopCh: - return nil, nil - default: - } - - // Handle the one-shot mode. - if l.opts.LockTryOnce && attempts > 0 { - elapsed := time.Since(start) - if elapsed > qOpts.WaitTime { - return nil, nil - } - - qOpts.WaitTime -= elapsed - } - attempts++ - - // Look for an existing lock, blocking until not taken - pair, meta, err := kv.Get(l.opts.Key, qOpts) - if err != nil { - return nil, fmt.Errorf("failed to read lock: %v", err) - } - if pair != nil && pair.Flags != LockFlagValue { - return nil, ErrLockConflict - } - locked := false - if pair != nil && pair.Session == l.lockSession { - goto HELD - } - if pair != nil && pair.Session != "" { - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } - - // Try to acquire the lock - pair = l.lockEntry(l.lockSession) - locked, _, err = kv.Acquire(pair, nil) - if err != nil { - return nil, fmt.Errorf("failed to acquire lock: %v", err) - } - - // Handle the case of not getting the lock - if !locked { - // Determine why the lock failed - qOpts.WaitIndex = 0 - pair, meta, err = kv.Get(l.opts.Key, qOpts) - if pair != nil && pair.Session != "" { - //If the session is not null, this means that a wait can safely happen - //using a long poll - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } else { - // If the session is empty and the lock failed to acquire, then it means - // a lock-delay is in effect and a timed wait must be used - select { - case <-time.After(DefaultLockRetryTime): - goto WAIT - case <-stopCh: - return nil, nil - } - } - } - -HELD: - // Watch to ensure we maintain leadership - leaderCh := make(chan struct{}) - go l.monitorLock(l.lockSession, leaderCh) - - // Set that we own the lock - l.isHeld = true - - // Locked! All done - return leaderCh, nil -} - -// Unlock released the lock. It is an error to call this -// if the lock is not currently held. -func (l *Lock) Unlock() error { - // Hold the lock as we try to release - l.l.Lock() - defer l.l.Unlock() - - // Ensure the lock is actually held - if !l.isHeld { - return ErrLockNotHeld - } - - // Set that we no longer own the lock - l.isHeld = false - - // Stop the session renew - if l.sessionRenew != nil { - defer func() { - close(l.sessionRenew) - l.sessionRenew = nil - }() - } - - // Get the lock entry, and clear the lock session - lockEnt := l.lockEntry(l.lockSession) - l.lockSession = "" - - // Release the lock explicitly - kv := l.c.KV() - _, _, err := kv.Release(lockEnt, nil) - if err != nil { - return fmt.Errorf("failed to release lock: %v", err) - } - return nil -} - -// Destroy is used to cleanup the lock entry. It is not necessary -// to invoke. It will fail if the lock is in use. -func (l *Lock) Destroy() error { - // Hold the lock as we try to release - l.l.Lock() - defer l.l.Unlock() - - // Check if we already hold the lock - if l.isHeld { - return ErrLockHeld - } - - // Look for an existing lock - kv := l.c.KV() - pair, _, err := kv.Get(l.opts.Key, nil) - if err != nil { - return fmt.Errorf("failed to read lock: %v", err) - } - - // Nothing to do if the lock does not exist - if pair == nil { - return nil - } - - // Check for possible flag conflict - if pair.Flags != LockFlagValue { - return ErrLockConflict - } - - // Check if it is in use - if pair.Session != "" { - return ErrLockInUse - } - - // Attempt the delete - didRemove, _, err := kv.DeleteCAS(pair, nil) - if err != nil { - return fmt.Errorf("failed to remove lock: %v", err) - } - if !didRemove { - return ErrLockInUse - } - return nil -} - -// createSession is used to create a new managed session -func (l *Lock) createSession() (string, error) { - session := l.c.Session() - se := l.opts.SessionOpts - if se == nil { - se = &SessionEntry{ - Name: l.opts.SessionName, - TTL: l.opts.SessionTTL, - } - } - id, _, err := session.Create(se, nil) - if err != nil { - return "", err - } - return id, nil -} - -// lockEntry returns a formatted KVPair for the lock -func (l *Lock) lockEntry(session string) *KVPair { - return &KVPair{ - Key: l.opts.Key, - Value: l.opts.Value, - Session: session, - Flags: LockFlagValue, - } -} - -// monitorLock is a long running routine to monitor a lock ownership -// It closes the stopCh if we lose our leadership. -func (l *Lock) monitorLock(session string, stopCh chan struct{}) { - defer close(stopCh) - kv := l.c.KV() - opts := &QueryOptions{RequireConsistent: true} -WAIT: - retries := l.opts.MonitorRetries -RETRY: - pair, meta, err := kv.Get(l.opts.Key, opts) - if err != nil { - // If configured we can try to ride out a brief Consul unavailability - // by doing retries. Note that we have to attempt the retry in a non- - // blocking fashion so that we have a clean place to reset the retry - // counter if service is restored. - if retries > 0 && IsRetryableError(err) { - time.Sleep(l.opts.MonitorRetryTime) - retries-- - opts.WaitIndex = 0 - goto RETRY - } - return - } - if pair != nil && pair.Session == session { - opts.WaitIndex = meta.LastIndex - goto WAIT - } -} diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go deleted file mode 100644 index 079e224866..0000000000 --- a/vendor/github.com/hashicorp/consul/api/operator.go +++ /dev/null @@ -1,11 +0,0 @@ -package api - -// Operator can be used to perform low-level operator tasks for Consul. -type Operator struct { - c *Client -} - -// Operator returns a handle to the operator endpoints. -func (c *Client) Operator() *Operator { - return &Operator{c} -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go deleted file mode 100644 index a630b694cd..0000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_area.go +++ /dev/null @@ -1,193 +0,0 @@ -// The /v1/operator/area endpoints are available only in Consul Enterprise and -// interact with its network area subsystem. Network areas are used to link -// together Consul servers in different Consul datacenters. With network areas, -// Consul datacenters can be linked together in ways other than a fully-connected -// mesh, as is required for Consul's WAN. -package api - -import ( - "net" - "time" -) - -// Area defines a network area. -type Area struct { - // ID is this identifier for an area (a UUID). This must be left empty - // when creating a new area. - ID string - - // PeerDatacenter is the peer Consul datacenter that will make up the - // other side of this network area. Network areas always involve a pair - // of datacenters: the datacenter where the area was created, and the - // peer datacenter. This is required. - PeerDatacenter string - - // RetryJoin specifies the address of Consul servers to join to, such as - // an IPs or hostnames with an optional port number. This is optional. - RetryJoin []string - - // UseTLS specifies whether gossip over this area should be encrypted with TLS - // if possible. - UseTLS bool -} - -// AreaJoinResponse is returned when a join occurs and gives the result for each -// address. -type AreaJoinResponse struct { - // The address that was joined. - Address string - - // Whether or not the join was a success. - Joined bool - - // If we couldn't join, this is the message with information. - Error string -} - -// SerfMember is a generic structure for reporting information about members in -// a Serf cluster. This is only used by the area endpoints right now, but this -// could be expanded to other endpoints in the future. -type SerfMember struct { - // ID is the node identifier (a UUID). - ID string - - // Name is the node name. - Name string - - // Addr has the IP address. - Addr net.IP - - // Port is the RPC port. - Port uint16 - - // Datacenter is the DC name. - Datacenter string - - // Role is "client", "server", or "unknown". - Role string - - // Build has the version of the Consul agent. - Build string - - // Protocol is the protocol of the Consul agent. - Protocol int - - // Status is the Serf health status "none", "alive", "leaving", "left", - // or "failed". - Status string - - // RTT is the estimated round trip time from the server handling the - // request to the this member. This will be negative if no RTT estimate - // is available. - RTT time.Duration -} - -// AreaCreate will create a new network area. The ID in the given structure must -// be empty and a generated ID will be returned on success. -func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) { - r := op.c.newRequest("POST", "/v1/operator/area") - r.setWriteOptions(q) - r.obj = area - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// AreaUpdate will update the configuration of the network area with the given ID. -func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) { - r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID) - r.setWriteOptions(q) - r.obj = area - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// AreaGet returns a single network area. -func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) { - var out []*Area - qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// AreaList returns all the available network areas. -func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) { - var out []*Area - qm, err := op.c.query("/v1/operator/area", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// AreaDelete deletes the given network area. -func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) { - r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID) - r.setWriteOptions(q) - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - return wm, nil -} - -// AreaJoin attempts to join the given set of join addresses to the given -// network area. See the Area structure for details about join addresses. -func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) { - r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join") - r.setWriteOptions(q) - r.obj = addresses - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out []*AreaJoinResponse - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, wm, nil -} - -// AreaMembers lists the Serf information about the members in the given area. -func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) { - var out []*SerfMember - qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go deleted file mode 100644 index b179406dc1..0000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go +++ /dev/null @@ -1,219 +0,0 @@ -package api - -import ( - "bytes" - "fmt" - "io" - "strconv" - "strings" - "time" -) - -// AutopilotConfiguration is used for querying/setting the Autopilot configuration. -// Autopilot helps manage operator tasks related to Consul servers like removing -// failed servers from the Raft quorum. -type AutopilotConfiguration struct { - // CleanupDeadServers controls whether to remove dead servers from the Raft - // peer list when a new server joins - CleanupDeadServers bool - - // LastContactThreshold is the limit on the amount of time a server can go - // without leader contact before being considered unhealthy. - LastContactThreshold *ReadableDuration - - // MaxTrailingLogs is the amount of entries in the Raft Log that a server can - // be behind before being considered unhealthy. - MaxTrailingLogs uint64 - - // ServerStabilizationTime is the minimum amount of time a server must be - // in a stable, healthy state before it can be added to the cluster. Only - // applicable with Raft protocol version 3 or higher. - ServerStabilizationTime *ReadableDuration - - // (Enterprise-only) RedundancyZoneTag is the node tag to use for separating - // servers into zones for redundancy. If left blank, this feature will be disabled. - RedundancyZoneTag string - - // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration - // strategy of waiting until enough newer-versioned servers have been added to the - // cluster before promoting them to voters. - DisableUpgradeMigration bool - - // (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when - // performing upgrade migrations. If left blank, the Consul version will be used. - UpgradeVersionTag string - - // CreateIndex holds the index corresponding the creation of this configuration. - // This is a read-only field. - CreateIndex uint64 - - // ModifyIndex will be set to the index of the last update when retrieving the - // Autopilot configuration. Resubmitting a configuration with - // AutopilotCASConfiguration will perform a check-and-set operation which ensures - // there hasn't been a subsequent update since the configuration was retrieved. - ModifyIndex uint64 -} - -// ServerHealth is the health (from the leader's point of view) of a server. -type ServerHealth struct { - // ID is the raft ID of the server. - ID string - - // Name is the node name of the server. - Name string - - // Address is the address of the server. - Address string - - // The status of the SerfHealth check for the server. - SerfStatus string - - // Version is the Consul version of the server. - Version string - - // Leader is whether this server is currently the leader. - Leader bool - - // LastContact is the time since this node's last contact with the leader. - LastContact *ReadableDuration - - // LastTerm is the highest leader term this server has a record of in its Raft log. - LastTerm uint64 - - // LastIndex is the last log index this server has a record of in its Raft log. - LastIndex uint64 - - // Healthy is whether or not the server is healthy according to the current - // Autopilot config. - Healthy bool - - // Voter is whether this is a voting server. - Voter bool - - // StableSince is the last time this server's Healthy value changed. - StableSince time.Time -} - -// OperatorHealthReply is a representation of the overall health of the cluster -type OperatorHealthReply struct { - // Healthy is true if all the servers in the cluster are healthy. - Healthy bool - - // FailureTolerance is the number of healthy servers that could be lost without - // an outage occurring. - FailureTolerance int - - // Servers holds the health of each server. - Servers []ServerHealth -} - -// ReadableDuration is a duration type that is serialized to JSON in human readable format. -type ReadableDuration time.Duration - -func NewReadableDuration(dur time.Duration) *ReadableDuration { - d := ReadableDuration(dur) - return &d -} - -func (d *ReadableDuration) String() string { - return d.Duration().String() -} - -func (d *ReadableDuration) Duration() time.Duration { - if d == nil { - return time.Duration(0) - } - return time.Duration(*d) -} - -func (d *ReadableDuration) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil -} - -func (d *ReadableDuration) UnmarshalJSON(raw []byte) error { - if d == nil { - return fmt.Errorf("cannot unmarshal to nil pointer") - } - - str := string(raw) - if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' { - return fmt.Errorf("must be enclosed with quotes: %s", str) - } - dur, err := time.ParseDuration(str[1 : len(str)-1]) - if err != nil { - return err - } - *d = ReadableDuration(dur) - return nil -} - -// AutopilotGetConfiguration is used to query the current Autopilot configuration. -func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) { - r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out AutopilotConfiguration - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - - return &out, nil -} - -// AutopilotSetConfiguration is used to set the current Autopilot configuration. -func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error { - r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") - r.setWriteOptions(q) - r.obj = conf - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// AutopilotCASConfiguration is used to perform a Check-And-Set update on the -// Autopilot configuration. The ModifyIndex value will be respected. Returns -// true on success or false on failures. -func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) { - r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") - r.setWriteOptions(q) - r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10)) - r.obj = conf - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return false, err - } - defer resp.Body.Close() - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(buf.String(), "true") - - return res, nil -} - -// AutopilotServerHealth -func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) { - r := op.c.newRequest("GET", "/v1/operator/autopilot/health") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out OperatorHealthReply - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return &out, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go deleted file mode 100644 index 6b614296ce..0000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_keyring.go +++ /dev/null @@ -1,86 +0,0 @@ -package api - -// keyringRequest is used for performing Keyring operations -type keyringRequest struct { - Key string -} - -// KeyringResponse is returned when listing the gossip encryption keys -type KeyringResponse struct { - // Whether this response is for a WAN ring - WAN bool - - // The datacenter name this request corresponds to - Datacenter string - - // Segment has the network segment this request corresponds to. - Segment string - - // A map of the encryption keys to the number of nodes they're installed on - Keys map[string]int - - // The total number of nodes in this ring - NumNodes int -} - -// KeyringInstall is used to install a new gossip encryption key into the cluster -func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { - r := op.c.newRequest("POST", "/v1/operator/keyring") - r.setWriteOptions(q) - r.obj = keyringRequest{ - Key: key, - } - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// KeyringList is used to list the gossip keys installed in the cluster -func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { - r := op.c.newRequest("GET", "/v1/operator/keyring") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*KeyringResponse - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// KeyringRemove is used to remove a gossip encryption key from the cluster -func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { - r := op.c.newRequest("DELETE", "/v1/operator/keyring") - r.setWriteOptions(q) - r.obj = keyringRequest{ - Key: key, - } - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// KeyringUse is used to change the active gossip encryption key -func (op *Operator) KeyringUse(key string, q *WriteOptions) error { - r := op.c.newRequest("PUT", "/v1/operator/keyring") - r.setWriteOptions(q) - r.obj = keyringRequest{ - Key: key, - } - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go deleted file mode 100644 index a9844df2dd..0000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_raft.go +++ /dev/null @@ -1,89 +0,0 @@ -package api - -// RaftServer has information about a server in the Raft configuration. -type RaftServer struct { - // ID is the unique ID for the server. These are currently the same - // as the address, but they will be changed to a real GUID in a future - // release of Consul. - ID string - - // Node is the node name of the server, as known by Consul, or this - // will be set to "(unknown)" otherwise. - Node string - - // Address is the IP:port of the server, used for Raft communications. - Address string - - // Leader is true if this server is the current cluster leader. - Leader bool - - // Protocol version is the raft protocol version used by the server - ProtocolVersion string - - // Voter is true if this server has a vote in the cluster. This might - // be false if the server is staging and still coming online, or if - // it's a non-voting server, which will be added in a future release of - // Consul. - Voter bool -} - -// RaftConfiguration is returned when querying for the current Raft configuration. -type RaftConfiguration struct { - // Servers has the list of servers in the Raft configuration. - Servers []*RaftServer - - // Index has the Raft index of this configuration. - Index uint64 -} - -// RaftGetConfiguration is used to query the current Raft peer set. -func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { - r := op.c.newRequest("GET", "/v1/operator/raft/configuration") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out RaftConfiguration - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return &out, nil -} - -// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft -// quorum but no longer known to Serf or the catalog) by address in the form of -// "IP:port". -func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { - r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") - r.setWriteOptions(q) - - r.params.Set("address", string(address)) - - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - - resp.Body.Close() - return nil -} - -// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft -// quorum but no longer known to Serf or the catalog) by ID. -func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { - r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") - r.setWriteOptions(q) - - r.params.Set("id", string(id)) - - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - - resp.Body.Close() - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_segment.go b/vendor/github.com/hashicorp/consul/api/operator_segment.go deleted file mode 100644 index 92b05d3c03..0000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_segment.go +++ /dev/null @@ -1,11 +0,0 @@ -package api - -// SegmentList returns all the available LAN segments. -func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) { - var out []string - qm, err := op.c.query("/v1/operator/segment", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go deleted file mode 100644 index d322dd8679..0000000000 --- a/vendor/github.com/hashicorp/consul/api/prepared_query.go +++ /dev/null @@ -1,204 +0,0 @@ -package api - -// QueryDatacenterOptions sets options about how we fail over if there are no -// healthy nodes in the local datacenter. -type QueryDatacenterOptions struct { - // NearestN is set to the number of remote datacenters to try, based on - // network coordinates. - NearestN int - - // Datacenters is a fixed list of datacenters to try after NearestN. We - // never try a datacenter multiple times, so those are subtracted from - // this list before proceeding. - Datacenters []string -} - -// QueryDNSOptions controls settings when query results are served over DNS. -type QueryDNSOptions struct { - // TTL is the time to live for the served DNS results. - TTL string -} - -// ServiceQuery is used to query for a set of healthy nodes offering a specific -// service. -type ServiceQuery struct { - // Service is the service to query. - Service string - - // Near allows baking in the name of a node to automatically distance- - // sort from. The magic "_agent" value is supported, which sorts near - // the agent which initiated the request by default. - Near string - - // Failover controls what we do if there are no healthy nodes in the - // local datacenter. - Failover QueryDatacenterOptions - - // IgnoreCheckIDs is an optional list of health check IDs to ignore when - // considering which nodes are healthy. It is useful as an emergency measure - // to temporarily override some health check that is producing false negatives - // for example. - IgnoreCheckIDs []string - - // If OnlyPassing is true then we will only include nodes with passing - // health checks (critical AND warning checks will cause a node to be - // discarded) - OnlyPassing bool - - // Tags are a set of required and/or disallowed tags. If a tag is in - // this list it must be present. If the tag is preceded with "!" then - // it is disallowed. - Tags []string - - // NodeMeta is a map of required node metadata fields. If a key/value - // pair is in this map it must be present on the node in order for the - // service entry to be returned. - NodeMeta map[string]string -} - -// QueryTemplate carries the arguments for creating a templated query. -type QueryTemplate struct { - // Type specifies the type of the query template. Currently only - // "name_prefix_match" is supported. This field is required. - Type string - - // Regexp allows specifying a regex pattern to match against the name - // of the query being executed. - Regexp string -} - -// PreparedQueryDefinition defines a complete prepared query. -type PreparedQueryDefinition struct { - // ID is this UUID-based ID for the query, always generated by Consul. - ID string - - // Name is an optional friendly name for the query supplied by the - // user. NOTE - if this feature is used then it will reduce the security - // of any read ACL associated with this query/service since this name - // can be used to locate nodes with supplying any ACL. - Name string - - // Session is an optional session to tie this query's lifetime to. If - // this is omitted then the query will not expire. - Session string - - // Token is the ACL token used when the query was created, and it is - // used when a query is subsequently executed. This token, or a token - // with management privileges, must be used to change the query later. - Token string - - // Service defines a service query (leaving things open for other types - // later). - Service ServiceQuery - - // DNS has options that control how the results of this query are - // served over DNS. - DNS QueryDNSOptions - - // Template is used to pass through the arguments for creating a - // prepared query with an attached template. If a template is given, - // interpolations are possible in other struct fields. - Template QueryTemplate -} - -// PreparedQueryExecuteResponse has the results of executing a query. -type PreparedQueryExecuteResponse struct { - // Service is the service that was queried. - Service string - - // Nodes has the nodes that were output by the query. - Nodes []ServiceEntry - - // DNS has the options for serving these results over DNS. - DNS QueryDNSOptions - - // Datacenter is the datacenter that these results came from. - Datacenter string - - // Failovers is a count of how many times we had to query a remote - // datacenter. - Failovers int -} - -// PreparedQuery can be used to query the prepared query endpoints. -type PreparedQuery struct { - c *Client -} - -// PreparedQuery returns a handle to the prepared query endpoints. -func (c *Client) PreparedQuery() *PreparedQuery { - return &PreparedQuery{c} -} - -// Create makes a new prepared query. The ID of the new query is returned. -func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) { - r := c.c.newRequest("POST", "/v1/query") - r.setWriteOptions(q) - r.obj = query - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Update makes updates to an existing prepared query. -func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) { - return c.c.write("/v1/query/"+query.ID, query, nil, q) -} - -// List is used to fetch all the prepared queries (always requires a management -// token). -func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { - var out []*PreparedQueryDefinition - qm, err := c.c.query("/v1/query", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Get is used to fetch a specific prepared query. -func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { - var out []*PreparedQueryDefinition - qm, err := c.c.query("/v1/query/"+queryID, &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Delete is used to delete a specific prepared query. -func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("DELETE", "/v1/query/"+queryID) - r.setWriteOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - return wm, nil -} - -// Execute is used to execute a specific prepared query. You can execute using -// a query ID or name. -func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) { - var out *PreparedQueryExecuteResponse - qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go deleted file mode 100644 index 745a208c99..0000000000 --- a/vendor/github.com/hashicorp/consul/api/raw.go +++ /dev/null @@ -1,24 +0,0 @@ -package api - -// Raw can be used to do raw queries against custom endpoints -type Raw struct { - c *Client -} - -// Raw returns a handle to query endpoints -func (c *Client) Raw() *Raw { - return &Raw{c} -} - -// Query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Consul conventions. -func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - return raw.c.query(endpoint, out, q) -} - -// Write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Consul conventions. -func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - return raw.c.write(endpoint, in, out, q) -} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go deleted file mode 100644 index d0c5741778..0000000000 --- a/vendor/github.com/hashicorp/consul/api/semaphore.go +++ /dev/null @@ -1,513 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "path" - "sync" - "time" -) - -const ( - // DefaultSemaphoreSessionName is the Session Name we assign if none is provided - DefaultSemaphoreSessionName = "Consul API Semaphore" - - // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided - // when creating a new Semaphore. This is used because we do not have another - // other check to depend upon. - DefaultSemaphoreSessionTTL = "15s" - - // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore - // acquisition is possible. This affects the minimum time it takes to cancel - // a Semaphore acquisition. - DefaultSemaphoreWaitTime = 15 * time.Second - - // DefaultSemaphoreKey is the key used within the prefix to - // use for coordination between all the contenders. - DefaultSemaphoreKey = ".lock" - - // SemaphoreFlagValue is a magic flag we set to indicate a key - // is being used for a semaphore. It is used to detect a potential - // conflict with a lock. - SemaphoreFlagValue = 0xe0f69a2baa414de0 -) - -var ( - // ErrSemaphoreHeld is returned if we attempt to double lock - ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") - - // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore - // that we do not hold. - ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") - - // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore - // that is in use. - ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") - - // ErrSemaphoreConflict is returned if the flags on a key - // used for a semaphore do not match expectation - ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") -) - -// Semaphore is used to implement a distributed semaphore -// using the Consul KV primitives. -type Semaphore struct { - c *Client - opts *SemaphoreOptions - - isHeld bool - sessionRenew chan struct{} - lockSession string - l sync.Mutex -} - -// SemaphoreOptions is used to parameterize the Semaphore -type SemaphoreOptions struct { - Prefix string // Must be set and have write permissions - Limit int // Must be set, and be positive - Value []byte // Optional, value to associate with the contender entry - Session string // Optional, created if not specified - SessionName string // Optional, defaults to DefaultLockSessionName - SessionTTL string // Optional, defaults to DefaultLockSessionTTL - MonitorRetries int // Optional, defaults to 0 which means no retries - MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime - SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime - SemaphoreTryOnce bool // Optional, defaults to false which means try forever -} - -// semaphoreLock is written under the DefaultSemaphoreKey and -// is used to coordinate between all the contenders. -type semaphoreLock struct { - // Limit is the integer limit of holders. This is used to - // verify that all the holders agree on the value. - Limit int - - // Holders is a list of all the semaphore holders. - // It maps the session ID to true. It is used as a set effectively. - Holders map[string]bool -} - -// SemaphorePrefix is used to created a Semaphore which will operate -// at the given KV prefix and uses the given limit for the semaphore. -// The prefix must have write privileges, and the limit must be agreed -// upon by all contenders. -func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { - opts := &SemaphoreOptions{ - Prefix: prefix, - Limit: limit, - } - return c.SemaphoreOpts(opts) -} - -// SemaphoreOpts is used to create a Semaphore with the given options. -// The prefix must have write privileges, and the limit must be agreed -// upon by all contenders. If a Session is not provided, one will be created. -func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { - if opts.Prefix == "" { - return nil, fmt.Errorf("missing prefix") - } - if opts.Limit <= 0 { - return nil, fmt.Errorf("semaphore limit must be positive") - } - if opts.SessionName == "" { - opts.SessionName = DefaultSemaphoreSessionName - } - if opts.SessionTTL == "" { - opts.SessionTTL = DefaultSemaphoreSessionTTL - } else { - if _, err := time.ParseDuration(opts.SessionTTL); err != nil { - return nil, fmt.Errorf("invalid SessionTTL: %v", err) - } - } - if opts.MonitorRetryTime == 0 { - opts.MonitorRetryTime = DefaultMonitorRetryTime - } - if opts.SemaphoreWaitTime == 0 { - opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime - } - s := &Semaphore{ - c: c, - opts: opts, - } - return s, nil -} - -// Acquire attempts to reserve a slot in the semaphore, blocking until -// success, interrupted via the stopCh or an error is encountered. -// Providing a non-nil stopCh can be used to abort the attempt. -// On success, a channel is returned that represents our slot. -// This channel could be closed at any time due to session invalidation, -// communication errors, operator intervention, etc. It is NOT safe to -// assume that the slot is held until Release() unless the Session is specifically -// created without any associated health checks. By default Consul sessions -// prefer liveness over safety and an application must be able to handle -// the session being lost. -func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { - // Hold the lock as we try to acquire - s.l.Lock() - defer s.l.Unlock() - - // Check if we already hold the semaphore - if s.isHeld { - return nil, ErrSemaphoreHeld - } - - // Check if we need to create a session first - s.lockSession = s.opts.Session - if s.lockSession == "" { - sess, err := s.createSession() - if err != nil { - return nil, fmt.Errorf("failed to create session: %v", err) - } - - s.sessionRenew = make(chan struct{}) - s.lockSession = sess - session := s.c.Session() - go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !s.isHeld { - close(s.sessionRenew) - s.sessionRenew = nil - } - }() - } - - // Create the contender entry - kv := s.c.KV() - made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil) - if err != nil || !made { - return nil, fmt.Errorf("failed to make contender entry: %v", err) - } - - // Setup the query options - qOpts := &QueryOptions{ - WaitTime: s.opts.SemaphoreWaitTime, - } - - start := time.Now() - attempts := 0 -WAIT: - // Check if we should quit - select { - case <-stopCh: - return nil, nil - default: - } - - // Handle the one-shot mode. - if s.opts.SemaphoreTryOnce && attempts > 0 { - elapsed := time.Since(start) - if elapsed > qOpts.WaitTime { - return nil, nil - } - - qOpts.WaitTime -= elapsed - } - attempts++ - - // Read the prefix - pairs, meta, err := kv.List(s.opts.Prefix, qOpts) - if err != nil { - return nil, fmt.Errorf("failed to read prefix: %v", err) - } - - // Decode the lock - lockPair := s.findLock(pairs) - if lockPair.Flags != SemaphoreFlagValue { - return nil, ErrSemaphoreConflict - } - lock, err := s.decodeLock(lockPair) - if err != nil { - return nil, err - } - - // Verify we agree with the limit - if lock.Limit != s.opts.Limit { - return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", - lock.Limit, s.opts.Limit) - } - - // Prune the dead holders - s.pruneDeadHolders(lock, pairs) - - // Check if the lock is held - if len(lock.Holders) >= lock.Limit { - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } - - // Create a new lock with us as a holder - lock.Holders[s.lockSession] = true - newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) - if err != nil { - return nil, err - } - - // Attempt the acquisition - didSet, _, err := kv.CAS(newLock, nil) - if err != nil { - return nil, fmt.Errorf("failed to update lock: %v", err) - } - if !didSet { - // Update failed, could have been a race with another contender, - // retry the operation - goto WAIT - } - - // Watch to ensure we maintain ownership of the slot - lockCh := make(chan struct{}) - go s.monitorLock(s.lockSession, lockCh) - - // Set that we own the lock - s.isHeld = true - - // Acquired! All done - return lockCh, nil -} - -// Release is used to voluntarily give up our semaphore slot. It is -// an error to call this if the semaphore has not been acquired. -func (s *Semaphore) Release() error { - // Hold the lock as we try to release - s.l.Lock() - defer s.l.Unlock() - - // Ensure the lock is actually held - if !s.isHeld { - return ErrSemaphoreNotHeld - } - - // Set that we no longer own the lock - s.isHeld = false - - // Stop the session renew - if s.sessionRenew != nil { - defer func() { - close(s.sessionRenew) - s.sessionRenew = nil - }() - } - - // Get and clear the lock session - lockSession := s.lockSession - s.lockSession = "" - - // Remove ourselves as a lock holder - kv := s.c.KV() - key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) -READ: - pair, _, err := kv.Get(key, nil) - if err != nil { - return err - } - if pair == nil { - pair = &KVPair{} - } - lock, err := s.decodeLock(pair) - if err != nil { - return err - } - - // Create a new lock without us as a holder - if _, ok := lock.Holders[lockSession]; ok { - delete(lock.Holders, lockSession) - newLock, err := s.encodeLock(lock, pair.ModifyIndex) - if err != nil { - return err - } - - // Swap the locks - didSet, _, err := kv.CAS(newLock, nil) - if err != nil { - return fmt.Errorf("failed to update lock: %v", err) - } - if !didSet { - goto READ - } - } - - // Destroy the contender entry - contenderKey := path.Join(s.opts.Prefix, lockSession) - if _, err := kv.Delete(contenderKey, nil); err != nil { - return err - } - return nil -} - -// Destroy is used to cleanup the semaphore entry. It is not necessary -// to invoke. It will fail if the semaphore is in use. -func (s *Semaphore) Destroy() error { - // Hold the lock as we try to acquire - s.l.Lock() - defer s.l.Unlock() - - // Check if we already hold the semaphore - if s.isHeld { - return ErrSemaphoreHeld - } - - // List for the semaphore - kv := s.c.KV() - pairs, _, err := kv.List(s.opts.Prefix, nil) - if err != nil { - return fmt.Errorf("failed to read prefix: %v", err) - } - - // Find the lock pair, bail if it doesn't exist - lockPair := s.findLock(pairs) - if lockPair.ModifyIndex == 0 { - return nil - } - if lockPair.Flags != SemaphoreFlagValue { - return ErrSemaphoreConflict - } - - // Decode the lock - lock, err := s.decodeLock(lockPair) - if err != nil { - return err - } - - // Prune the dead holders - s.pruneDeadHolders(lock, pairs) - - // Check if there are any holders - if len(lock.Holders) > 0 { - return ErrSemaphoreInUse - } - - // Attempt the delete - didRemove, _, err := kv.DeleteCAS(lockPair, nil) - if err != nil { - return fmt.Errorf("failed to remove semaphore: %v", err) - } - if !didRemove { - return ErrSemaphoreInUse - } - return nil -} - -// createSession is used to create a new managed session -func (s *Semaphore) createSession() (string, error) { - session := s.c.Session() - se := &SessionEntry{ - Name: s.opts.SessionName, - TTL: s.opts.SessionTTL, - Behavior: SessionBehaviorDelete, - } - id, _, err := session.Create(se, nil) - if err != nil { - return "", err - } - return id, nil -} - -// contenderEntry returns a formatted KVPair for the contender -func (s *Semaphore) contenderEntry(session string) *KVPair { - return &KVPair{ - Key: path.Join(s.opts.Prefix, session), - Value: s.opts.Value, - Session: session, - Flags: SemaphoreFlagValue, - } -} - -// findLock is used to find the KV Pair which is used for coordination -func (s *Semaphore) findLock(pairs KVPairs) *KVPair { - key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) - for _, pair := range pairs { - if pair.Key == key { - return pair - } - } - return &KVPair{Flags: SemaphoreFlagValue} -} - -// decodeLock is used to decode a semaphoreLock from an -// entry in Consul -func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { - // Handle if there is no lock - if pair == nil || pair.Value == nil { - return &semaphoreLock{ - Limit: s.opts.Limit, - Holders: make(map[string]bool), - }, nil - } - - l := &semaphoreLock{} - if err := json.Unmarshal(pair.Value, l); err != nil { - return nil, fmt.Errorf("lock decoding failed: %v", err) - } - return l, nil -} - -// encodeLock is used to encode a semaphoreLock into a KVPair -// that can be PUT -func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { - enc, err := json.Marshal(l) - if err != nil { - return nil, fmt.Errorf("lock encoding failed: %v", err) - } - pair := &KVPair{ - Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), - Value: enc, - Flags: SemaphoreFlagValue, - ModifyIndex: oldIndex, - } - return pair, nil -} - -// pruneDeadHolders is used to remove all the dead lock holders -func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { - // Gather all the live holders - alive := make(map[string]struct{}, len(pairs)) - for _, pair := range pairs { - if pair.Session != "" { - alive[pair.Session] = struct{}{} - } - } - - // Remove any holders that are dead - for holder := range lock.Holders { - if _, ok := alive[holder]; !ok { - delete(lock.Holders, holder) - } - } -} - -// monitorLock is a long running routine to monitor a semaphore ownership -// It closes the stopCh if we lose our slot. -func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { - defer close(stopCh) - kv := s.c.KV() - opts := &QueryOptions{RequireConsistent: true} -WAIT: - retries := s.opts.MonitorRetries -RETRY: - pairs, meta, err := kv.List(s.opts.Prefix, opts) - if err != nil { - // If configured we can try to ride out a brief Consul unavailability - // by doing retries. Note that we have to attempt the retry in a non- - // blocking fashion so that we have a clean place to reset the retry - // counter if service is restored. - if retries > 0 && IsRetryableError(err) { - time.Sleep(s.opts.MonitorRetryTime) - retries-- - opts.WaitIndex = 0 - goto RETRY - } - return - } - lockPair := s.findLock(pairs) - lock, err := s.decodeLock(lockPair) - if err != nil { - return - } - s.pruneDeadHolders(lock, pairs) - if _, ok := lock.Holders[session]; ok { - opts.WaitIndex = meta.LastIndex - goto WAIT - } -} diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go deleted file mode 100644 index 1613f11a60..0000000000 --- a/vendor/github.com/hashicorp/consul/api/session.go +++ /dev/null @@ -1,224 +0,0 @@ -package api - -import ( - "errors" - "fmt" - "time" -) - -const ( - // SessionBehaviorRelease is the default behavior and causes - // all associated locks to be released on session invalidation. - SessionBehaviorRelease = "release" - - // SessionBehaviorDelete is new in Consul 0.5 and changes the - // behavior to delete all associated locks on session invalidation. - // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. - SessionBehaviorDelete = "delete" -) - -var ErrSessionExpired = errors.New("session expired") - -// SessionEntry represents a session in consul -type SessionEntry struct { - CreateIndex uint64 - ID string - Name string - Node string - Checks []string - LockDelay time.Duration - Behavior string - TTL string -} - -// Session can be used to query the Session endpoints -type Session struct { - c *Client -} - -// Session returns a handle to the session endpoints -func (c *Client) Session() *Session { - return &Session{c} -} - -// CreateNoChecks is like Create but is used specifically to create -// a session with no associated health checks. -func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { - body := make(map[string]interface{}) - body["Checks"] = []string{} - if se != nil { - if se.Name != "" { - body["Name"] = se.Name - } - if se.Node != "" { - body["Node"] = se.Node - } - if se.LockDelay != 0 { - body["LockDelay"] = durToMsec(se.LockDelay) - } - if se.Behavior != "" { - body["Behavior"] = se.Behavior - } - if se.TTL != "" { - body["TTL"] = se.TTL - } - } - return s.create(body, q) - -} - -// Create makes a new session. Providing a session entry can -// customize the session. It can also be nil to use defaults. -func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { - var obj interface{} - if se != nil { - body := make(map[string]interface{}) - obj = body - if se.Name != "" { - body["Name"] = se.Name - } - if se.Node != "" { - body["Node"] = se.Node - } - if se.LockDelay != 0 { - body["LockDelay"] = durToMsec(se.LockDelay) - } - if len(se.Checks) > 0 { - body["Checks"] = se.Checks - } - if se.Behavior != "" { - body["Behavior"] = se.Behavior - } - if se.TTL != "" { - body["TTL"] = se.TTL - } - } - return s.create(obj, q) -} - -func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { - var out struct{ ID string } - wm, err := s.c.write("/v1/session/create", obj, &out, q) - if err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Destroy invalidates a given session -func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { - wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// Renew renews the TTL on a given session -func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { - r := s.c.newRequest("PUT", "/v1/session/renew/"+id) - r.setWriteOptions(q) - rtt, resp, err := s.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - - if resp.StatusCode == 404 { - return nil, wm, nil - } else if resp.StatusCode != 200 { - return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) - } - - var entries []*SessionEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, fmt.Errorf("Failed to read response: %v", err) - } - if len(entries) > 0 { - return entries[0], wm, nil - } - return nil, wm, nil -} - -// RenewPeriodic is used to periodically invoke Session.Renew on a -// session until a doneCh is closed. This is meant to be used in a long running -// goroutine to ensure a session stays valid. -func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error { - ctx := q.Context() - - ttl, err := time.ParseDuration(initialTTL) - if err != nil { - return err - } - - waitDur := ttl / 2 - lastRenewTime := time.Now() - var lastErr error - for { - if time.Since(lastRenewTime) > ttl { - return lastErr - } - select { - case <-time.After(waitDur): - entry, _, err := s.Renew(id, q) - if err != nil { - waitDur = time.Second - lastErr = err - continue - } - if entry == nil { - return ErrSessionExpired - } - - // Handle the server updating the TTL - ttl, _ = time.ParseDuration(entry.TTL) - waitDur = ttl / 2 - lastRenewTime = time.Now() - - case <-doneCh: - // Attempt a session destroy - s.Destroy(id, q) - return nil - - case <-ctx.Done(): - // Bail immediately since attempting the destroy would - // use the canceled context in q, which would just bail. - return ctx.Err() - } - } -} - -// Info looks up a single session -func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/info/"+id, &entries, q) - if err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List gets sessions for a node -func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/node/"+node, &entries, q) - if err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// List gets all active sessions -func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/list", &entries, q) - if err != nil { - return nil, nil, err - } - return entries, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go deleted file mode 100644 index e902377dd5..0000000000 --- a/vendor/github.com/hashicorp/consul/api/snapshot.go +++ /dev/null @@ -1,47 +0,0 @@ -package api - -import ( - "io" -) - -// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of -// Consul's internal state and restore snapshots for disaster recovery. -type Snapshot struct { - c *Client -} - -// Snapshot returns a handle that exposes the snapshot endpoints. -func (c *Client) Snapshot() *Snapshot { - return &Snapshot{c} -} - -// Save requests a new snapshot and provides an io.ReadCloser with the snapshot -// data to save. If this doesn't return an error, then it's the responsibility -// of the caller to close it. Only a subset of the QueryOptions are supported: -// Datacenter, AllowStale, and Token. -func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { - r := s.c.newRequest("GET", "/v1/snapshot") - r.setQueryOptions(q) - - rtt, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - return resp.Body, qm, nil -} - -// Restore streams in an existing snapshot and attempts to restore it. -func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { - r := s.c.newRequest("PUT", "/v1/snapshot") - r.body = in - r.setWriteOptions(q) - _, _, err := requireOK(s.c.doRequest(r)) - if err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go deleted file mode 100644 index 74ef61a678..0000000000 --- a/vendor/github.com/hashicorp/consul/api/status.go +++ /dev/null @@ -1,43 +0,0 @@ -package api - -// Status can be used to query the Status endpoints -type Status struct { - c *Client -} - -// Status returns a handle to the status endpoints -func (c *Client) Status() *Status { - return &Status{c} -} - -// Leader is used to query for a known leader -func (s *Status) Leader() (string, error) { - r := s.c.newRequest("GET", "/v1/status/leader") - _, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return "", err - } - defer resp.Body.Close() - - var leader string - if err := decodeBody(resp, &leader); err != nil { - return "", err - } - return leader, nil -} - -// Peers is used to query for a known raft peers -func (s *Status) Peers() ([]string, error) { - r := s.c.newRequest("GET", "/v1/status/peers") - _, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var peers []string - if err := decodeBody(resp, &peers); err != nil { - return nil, err - } - return peers, nil -} diff --git a/vendor/github.com/hashicorp/consul/website/LICENSE.md b/vendor/github.com/hashicorp/consul/website/LICENSE.md deleted file mode 100644 index 3189f43a65..0000000000 --- a/vendor/github.com/hashicorp/consul/website/LICENSE.md +++ /dev/null @@ -1,10 +0,0 @@ -# Proprietary License - -This license is temporary while a more official one is drafted. However, -this should make it clear: - -The text contents of this website are MPL 2.0 licensed. - -The design contents of this website are proprietary and may not be reproduced -or reused in any way other than to run the website locally. The license for -the design is owned solely by HashiCorp, Inc. diff --git a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml deleted file mode 100644 index 80e1de44e9..0000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go - -go: - - 1.6 - -branches: - only: - - master - -script: make test diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE deleted file mode 100644 index e87a115e46..0000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile deleted file mode 100644 index c3989e789f..0000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -TEST?=./... - -test: - go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4 - go vet $(TEST) - go test $(TEST) -race - -.PHONY: test diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md deleted file mode 100644 index f5abffc293..0000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# rootcerts - -Functions for loading root certificates for TLS connections. - ------ - -Go's standard library `crypto/tls` provides a common mechanism for configuring -TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool -of certificates for the client to use as a trust store when verifying server -certificates. - -This library contains utility functions for loading certificates destined for -that field, as well as one other important thing: - -When the `RootCAs` field is `nil`, the standard library attempts to load the -host's root CA set. This behavior is OS-specific, and the Darwin -implementation contains [a bug that prevents trusted certificates from the -System and Login keychains from being loaded][1]. This library contains -Darwin-specific behavior that works around that bug. - -[1]: https://github.com/golang/go/issues/14514 - -## Example Usage - -Here's a snippet demonstrating how this library is meant to be used: - -```go -func httpClient() (*http.Client, error) - tlsConfig := &tls.Config{} - err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ - CAFile: os.Getenv("MYAPP_CAFILE"), - CAPath: os.Getenv("MYAPP_CAPATH"), - }) - if err != nil { - return nil, err - } - c := cleanhttp.DefaultClient() - t := cleanhttp.DefaultTransport() - t.TLSClientConfig = tlsConfig - c.Transport = t - return c, nil -} -``` diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go deleted file mode 100644 index b55cc62848..0000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package rootcerts contains functions to aid in loading CA certificates for -// TLS connections. -// -// In addition, its default behavior on Darwin works around an open issue [1] -// in Go's crypto/x509 that prevents certicates from being loaded from the -// System or Login keychains. -// -// [1] https://github.com/golang/go/issues/14514 -package rootcerts diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go deleted file mode 100644 index aeb30ece32..0000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go +++ /dev/null @@ -1,103 +0,0 @@ -package rootcerts - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -// Config determines where LoadCACerts will load certificates from. When both -// CAFile and CAPath are blank, this library's functions will either load -// system roots explicitly and return them, or set the CertPool to nil to allow -// Go's standard library to load system certs. -type Config struct { - // CAFile is a path to a PEM-encoded certificate file or bundle. Takes - // precedence over CAPath. - CAFile string - - // CAPath is a path to a directory populated with PEM-encoded certificates. - CAPath string -} - -// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the -// Config specified. -func ConfigureTLS(t *tls.Config, c *Config) error { - if t == nil { - return nil - } - pool, err := LoadCACerts(c) - if err != nil { - return err - } - t.RootCAs = pool - return nil -} - -// LoadCACerts loads a CertPool based on the Config specified. -func LoadCACerts(c *Config) (*x509.CertPool, error) { - if c == nil { - c = &Config{} - } - if c.CAFile != "" { - return LoadCAFile(c.CAFile) - } - if c.CAPath != "" { - return LoadCAPath(c.CAPath) - } - - return LoadSystemCAs() -} - -// LoadCAFile loads a single PEM-encoded file from the path specified. -func LoadCAFile(caFile string) (*x509.CertPool, error) { - pool := x509.NewCertPool() - - pem, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("Error loading CA File: %s", err) - } - - ok := pool.AppendCertsFromPEM(pem) - if !ok { - return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile) - } - - return pool, nil -} - -// LoadCAPath walks the provided path and loads all certificates encounted into -// a pool. -func LoadCAPath(caPath string) (*x509.CertPool, error) { - pool := x509.NewCertPool() - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() { - return nil - } - - pem, err := ioutil.ReadFile(path) - if err != nil { - return fmt.Errorf("Error loading file from CAPath: %s", err) - } - - ok := pool.AppendCertsFromPEM(pem) - if !ok { - return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path) - } - - return nil - } - - err := filepath.Walk(caPath, walkFn) - if err != nil { - return nil, err - } - - return pool, nil -} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go deleted file mode 100644 index 66b1472c4a..0000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !darwin - -package rootcerts - -import "crypto/x509" - -// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that -// default behavior of standard TLS config libraries is triggered, which is to -// load system certs. -func LoadSystemCAs() (*x509.CertPool, error) { - return nil, nil -} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go deleted file mode 100644 index a9a040657f..0000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go +++ /dev/null @@ -1,48 +0,0 @@ -package rootcerts - -import ( - "crypto/x509" - "os/exec" - "path" - - "github.com/mitchellh/go-homedir" -) - -// LoadSystemCAs has special behavior on Darwin systems to work around -func LoadSystemCAs() (*x509.CertPool, error) { - pool := x509.NewCertPool() - - for _, keychain := range certKeychains() { - err := addCertsFromKeychain(pool, keychain) - if err != nil { - return nil, err - } - } - - return pool, nil -} - -func addCertsFromKeychain(pool *x509.CertPool, keychain string) error { - cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain) - data, err := cmd.Output() - if err != nil { - return err - } - - pool.AppendCertsFromPEM(data) - - return nil -} - -func certKeychains() []string { - keychains := []string{ - "/System/Library/Keychains/SystemRootCertificates.keychain", - "/Library/Keychains/System.keychain", - } - home, err := homedir.Dir() - if err == nil { - loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain") - keychains = append(keychains, loginKeychain) - } - return keychains -} diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem deleted file mode 120000 index dda0574d7f..0000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem +++ /dev/null @@ -1 +0,0 @@ -../capath/securetrust.pem \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem deleted file mode 120000 index 37ed4f01a4..0000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem +++ /dev/null @@ -1 +0,0 @@ -../capath/thawte.pem \ No newline at end of file diff --git a/vendor/github.com/hashicorp/serf/LICENSE b/vendor/github.com/hashicorp/serf/LICENSE deleted file mode 100644 index c33dcc7c92..0000000000 --- a/vendor/github.com/hashicorp/serf/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go deleted file mode 100644 index 613bfff89e..0000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/client.go +++ /dev/null @@ -1,180 +0,0 @@ -package coordinate - -import ( - "fmt" - "math" - "sort" - "sync" - "time" -) - -// Client manages the estimated network coordinate for a given node, and adjusts -// it as the node observes round trip times and estimated coordinates from other -// nodes. The core algorithm is based on Vivaldi, see the documentation for Config -// for more details. -type Client struct { - // coord is the current estimate of the client's network coordinate. - coord *Coordinate - - // origin is a coordinate sitting at the origin. - origin *Coordinate - - // config contains the tuning parameters that govern the performance of - // the algorithm. - config *Config - - // adjustmentIndex is the current index into the adjustmentSamples slice. - adjustmentIndex uint - - // adjustment is used to store samples for the adjustment calculation. - adjustmentSamples []float64 - - // latencyFilterSamples is used to store the last several RTT samples, - // keyed by node name. We will use the config's LatencyFilterSamples - // value to determine how many samples we keep, per node. - latencyFilterSamples map[string][]float64 - - // mutex enables safe concurrent access to the client. - mutex sync.RWMutex -} - -// NewClient creates a new Client and verifies the configuration is valid. -func NewClient(config *Config) (*Client, error) { - if !(config.Dimensionality > 0) { - return nil, fmt.Errorf("dimensionality must be >0") - } - - return &Client{ - coord: NewCoordinate(config), - origin: NewCoordinate(config), - config: config, - adjustmentIndex: 0, - adjustmentSamples: make([]float64, config.AdjustmentWindowSize), - latencyFilterSamples: make(map[string][]float64), - }, nil -} - -// GetCoordinate returns a copy of the coordinate for this client. -func (c *Client) GetCoordinate() *Coordinate { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return c.coord.Clone() -} - -// SetCoordinate forces the client's coordinate to a known state. -func (c *Client) SetCoordinate(coord *Coordinate) { - c.mutex.Lock() - defer c.mutex.Unlock() - - c.coord = coord.Clone() -} - -// ForgetNode removes any client state for the given node. -func (c *Client) ForgetNode(node string) { - c.mutex.Lock() - defer c.mutex.Unlock() - - delete(c.latencyFilterSamples, node) -} - -// latencyFilter applies a simple moving median filter with a new sample for -// a node. This assumes that the mutex has been locked already. -func (c *Client) latencyFilter(node string, rttSeconds float64) float64 { - samples, ok := c.latencyFilterSamples[node] - if !ok { - samples = make([]float64, 0, c.config.LatencyFilterSize) - } - - // Add the new sample and trim the list, if needed. - samples = append(samples, rttSeconds) - if len(samples) > int(c.config.LatencyFilterSize) { - samples = samples[1:] - } - c.latencyFilterSamples[node] = samples - - // Sort a copy of the samples and return the median. - sorted := make([]float64, len(samples)) - copy(sorted, samples) - sort.Float64s(sorted) - return sorted[len(sorted)/2] -} - -// updateVivialdi updates the Vivaldi portion of the client's coordinate. This -// assumes that the mutex has been locked already. -func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) { - const zeroThreshold = 1.0e-6 - - dist := c.coord.DistanceTo(other).Seconds() - if rttSeconds < zeroThreshold { - rttSeconds = zeroThreshold - } - wrongness := math.Abs(dist-rttSeconds) / rttSeconds - - totalError := c.coord.Error + other.Error - if totalError < zeroThreshold { - totalError = zeroThreshold - } - weight := c.coord.Error / totalError - - c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight) - if c.coord.Error > c.config.VivaldiErrorMax { - c.coord.Error = c.config.VivaldiErrorMax - } - - delta := c.config.VivaldiCC * weight - force := delta * (rttSeconds - dist) - c.coord = c.coord.ApplyForce(c.config, force, other) -} - -// updateAdjustment updates the adjustment portion of the client's coordinate, if -// the feature is enabled. This assumes that the mutex has been locked already. -func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) { - if c.config.AdjustmentWindowSize == 0 { - return - } - - // Note that the existing adjustment factors don't figure in to this - // calculation so we use the raw distance here. - dist := c.coord.rawDistanceTo(other) - c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist - c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize - - sum := 0.0 - for _, sample := range c.adjustmentSamples { - sum += sample - } - c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize)) -} - -// updateGravity applies a small amount of gravity to pull coordinates towards -// the center of the coordinate system to combat drift. This assumes that the -// mutex is locked already. -func (c *Client) updateGravity() { - dist := c.origin.DistanceTo(c.coord).Seconds() - force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0) - c.coord = c.coord.ApplyForce(c.config, force, c.origin) -} - -// Update takes other, a coordinate for another node, and rtt, a round trip -// time observation for a ping to that node, and updates the estimated position of -// the client's coordinate. Returns the updated coordinate. -func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) *Coordinate { - c.mutex.Lock() - defer c.mutex.Unlock() - - rttSeconds := c.latencyFilter(node, rtt.Seconds()) - c.updateVivaldi(other, rttSeconds) - c.updateAdjustment(other, rttSeconds) - c.updateGravity() - return c.coord.Clone() -} - -// DistanceTo returns the estimated RTT from the client's coordinate to other, the -// coordinate for another node. -func (c *Client) DistanceTo(other *Coordinate) time.Duration { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return c.coord.DistanceTo(other) -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go deleted file mode 100644 index b85a8ab7b0..0000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/config.go +++ /dev/null @@ -1,70 +0,0 @@ -package coordinate - -// Config is used to set the parameters of the Vivaldi-based coordinate mapping -// algorithm. -// -// The following references are called out at various points in the documentation -// here: -// -// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system." -// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004. -// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates -// in the Wild." NSDI. Vol. 7. 2007. -// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for -// host-based network coordinate systems." Networking, IEEE/ACM Transactions -// on 18.1 (2010): 27-40. -type Config struct { - // The dimensionality of the coordinate system. As discussed in [2], more - // dimensions improves the accuracy of the estimates up to a point. Per [2] - // we chose 8 dimensions plus a non-Euclidean height. - Dimensionality uint - - // VivaldiErrorMax is the default error value when a node hasn't yet made - // any observations. It also serves as an upper limit on the error value in - // case observations cause the error value to increase without bound. - VivaldiErrorMax float64 - - // VivaldiCE is a tuning factor that controls the maximum impact an - // observation can have on a node's confidence. See [1] for more details. - VivaldiCE float64 - - // VivaldiCC is a tuning factor that controls the maximum impact an - // observation can have on a node's coordinate. See [1] for more details. - VivaldiCC float64 - - // AdjustmentWindowSize is a tuning factor that determines how many samples - // we retain to calculate the adjustment factor as discussed in [3]. Setting - // this to zero disables this feature. - AdjustmentWindowSize uint - - // HeightMin is the minimum value of the height parameter. Since this - // always must be positive, it will introduce a small amount error, so - // the chosen value should be relatively small compared to "normal" - // coordinates. - HeightMin float64 - - // LatencyFilterSamples is the maximum number of samples that are retained - // per node, in order to compute a median. The intent is to ride out blips - // but still keep the delay low, since our time to probe any given node is - // pretty infrequent. See [2] for more details. - LatencyFilterSize uint - - // GravityRho is a tuning factor that sets how much gravity has an effect - // to try to re-center coordinates. See [2] for more details. - GravityRho float64 -} - -// DefaultConfig returns a Config that has some default values suitable for -// basic testing of the algorithm, but not tuned to any particular type of cluster. -func DefaultConfig() *Config { - return &Config{ - Dimensionality: 8, - VivaldiErrorMax: 1.5, - VivaldiCE: 0.25, - VivaldiCC: 0.25, - AdjustmentWindowSize: 20, - HeightMin: 10.0e-6, - LatencyFilterSize: 3, - GravityRho: 150.0, - } -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go deleted file mode 100644 index c9194e048b..0000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go +++ /dev/null @@ -1,183 +0,0 @@ -package coordinate - -import ( - "math" - "math/rand" - "time" -) - -// Coordinate is a specialized structure for holding network coordinates for the -// Vivaldi-based coordinate mapping algorithm. All of the fields should be public -// to enable this to be serialized. All values in here are in units of seconds. -type Coordinate struct { - // Vec is the Euclidean portion of the coordinate. This is used along - // with the other fields to provide an overall distance estimate. The - // units here are seconds. - Vec []float64 - - // Err reflects the confidence in the given coordinate and is updated - // dynamically by the Vivaldi Client. This is dimensionless. - Error float64 - - // Adjustment is a distance offset computed based on a calculation over - // observations from all other nodes over a fixed window and is updated - // dynamically by the Vivaldi Client. The units here are seconds. - Adjustment float64 - - // Height is a distance offset that accounts for non-Euclidean effects - // which model the access links from nodes to the core Internet. The access - // links are usually set by bandwidth and congestion, and the core links - // usually follow distance based on geography. - Height float64 -} - -const ( - // secondsToNanoseconds is used to convert float seconds to nanoseconds. - secondsToNanoseconds = 1.0e9 - - // zeroThreshold is used to decide if two coordinates are on top of each - // other. - zeroThreshold = 1.0e-6 -) - -// ErrDimensionalityConflict will be panic-d if you try to perform operations -// with incompatible dimensions. -type DimensionalityConflictError struct{} - -// Adds the error interface. -func (e DimensionalityConflictError) Error() string { - return "coordinate dimensionality does not match" -} - -// NewCoordinate creates a new coordinate at the origin, using the given config -// to supply key initial values. -func NewCoordinate(config *Config) *Coordinate { - return &Coordinate{ - Vec: make([]float64, config.Dimensionality), - Error: config.VivaldiErrorMax, - Adjustment: 0.0, - Height: config.HeightMin, - } -} - -// Clone creates an independent copy of this coordinate. -func (c *Coordinate) Clone() *Coordinate { - vec := make([]float64, len(c.Vec)) - copy(vec, c.Vec) - return &Coordinate{ - Vec: vec, - Error: c.Error, - Adjustment: c.Adjustment, - Height: c.Height, - } -} - -// IsCompatibleWith checks to see if the two coordinates are compatible -// dimensionally. If this returns true then you are guaranteed to not get -// any runtime errors operating on them. -func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool { - return len(c.Vec) == len(other.Vec) -} - -// ApplyForce returns the result of applying the force from the direction of the -// other coordinate. -func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate { - if !c.IsCompatibleWith(other) { - panic(DimensionalityConflictError{}) - } - - ret := c.Clone() - unit, mag := unitVectorAt(c.Vec, other.Vec) - ret.Vec = add(ret.Vec, mul(unit, force)) - if mag > zeroThreshold { - ret.Height = (ret.Height+other.Height)*force/mag + ret.Height - ret.Height = math.Max(ret.Height, config.HeightMin) - } - return ret -} - -// DistanceTo returns the distance between this coordinate and the other -// coordinate, including adjustments. -func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration { - if !c.IsCompatibleWith(other) { - panic(DimensionalityConflictError{}) - } - - dist := c.rawDistanceTo(other) - adjustedDist := dist + c.Adjustment + other.Adjustment - if adjustedDist > 0.0 { - dist = adjustedDist - } - return time.Duration(dist * secondsToNanoseconds) -} - -// rawDistanceTo returns the Vivaldi distance between this coordinate and the -// other coordinate in seconds, not including adjustments. This assumes the -// dimensions have already been checked to be compatible. -func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 { - return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height -} - -// add returns the sum of vec1 and vec2. This assumes the dimensions have -// already been checked to be compatible. -func add(vec1 []float64, vec2 []float64) []float64 { - ret := make([]float64, len(vec1)) - for i, _ := range ret { - ret[i] = vec1[i] + vec2[i] - } - return ret -} - -// diff returns the difference between the vec1 and vec2. This assumes the -// dimensions have already been checked to be compatible. -func diff(vec1 []float64, vec2 []float64) []float64 { - ret := make([]float64, len(vec1)) - for i, _ := range ret { - ret[i] = vec1[i] - vec2[i] - } - return ret -} - -// mul returns vec multiplied by a scalar factor. -func mul(vec []float64, factor float64) []float64 { - ret := make([]float64, len(vec)) - for i, _ := range vec { - ret[i] = vec[i] * factor - } - return ret -} - -// magnitude computes the magnitude of the vec. -func magnitude(vec []float64) float64 { - sum := 0.0 - for i, _ := range vec { - sum += vec[i] * vec[i] - } - return math.Sqrt(sum) -} - -// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two -// positions are the same then a random unit vector is returned. We also return -// the distance between the points for use in the later height calculation. -func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) { - ret := diff(vec1, vec2) - - // If the coordinates aren't on top of each other we can normalize. - if mag := magnitude(ret); mag > zeroThreshold { - return mul(ret, 1.0/mag), mag - } - - // Otherwise, just return a random unit vector. - for i, _ := range ret { - ret[i] = rand.Float64() - 0.5 - } - if mag := magnitude(ret); mag > zeroThreshold { - return mul(ret, 1.0/mag), 0.0 - } - - // And finally just give up and make a unit vector along the first - // dimension. This should be exceedingly rare. - ret = make([]float64, len(ret)) - ret[0] = 1.0 - return ret, 0.0 -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go deleted file mode 100644 index 6fb033c0cd..0000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/phantom.go +++ /dev/null @@ -1,187 +0,0 @@ -package coordinate - -import ( - "fmt" - "math" - "math/rand" - "time" -) - -// GenerateClients returns a slice with nodes number of clients, all with the -// given config. -func GenerateClients(nodes int, config *Config) ([]*Client, error) { - clients := make([]*Client, nodes) - for i, _ := range clients { - client, err := NewClient(config) - if err != nil { - return nil, err - } - - clients[i] = client - } - return clients, nil -} - -// GenerateLine returns a truth matrix as if all the nodes are in a straight linke -// with the given spacing between them. -func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - rtt := time.Duration(j-i) * spacing - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional -// grid with the given spacing between them. -func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - n := int(math.Sqrt(float64(nodes))) - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - x1, y1 := float64(i%n), float64(i/n) - x2, y2 := float64(j%n), float64(j/n) - dx, dy := x2-x1, y2-y1 - dist := math.Sqrt(dx*dx + dy*dy) - rtt := time.Duration(dist * float64(spacing)) - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateSplit returns a truth matrix as if half the nodes are close together in -// one location and half the nodes are close together in another. The lan factor -// is used to separate the nodes locally and the wan factor represents the split -// between the two sides. -func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - split := nodes / 2 - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - rtt := lan - if (i <= split && j > split) || (i > split && j <= split) { - rtt += wan - } - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed -// around a circle with the given radius. The first node is at the "center" of the -// circle because it's equidistant from all the other nodes, but we place it at -// double the radius, so it should show up above all the other nodes in height. -func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - var rtt time.Duration - if i == 0 { - rtt = 2 * radius - } else { - t1 := 2.0 * math.Pi * float64(i) / float64(nodes) - x1, y1 := math.Cos(t1), math.Sin(t1) - t2 := 2.0 * math.Pi * float64(j) / float64(nodes) - x2, y2 := math.Cos(t2), math.Sin(t2) - dx, dy := x2-x1, y2-y1 - dist := math.Sqrt(dx*dx + dy*dy) - rtt = time.Duration(dist * float64(radius)) - } - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateRandom returns a truth matrix for a set of nodes with normally -// distributed delays, with the given mean and deviation. The RNG is re-seeded -// so you always get the same matrix for a given size. -func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration { - rand.Seed(1) - - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds() - rtt := time.Duration(rttSeconds * secondsToNanoseconds) - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// Simulate runs the given number of cycles using the given list of clients and -// truth matrix. On each cycle, each client will pick a random node and observe -// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for -// each simulation run to get deterministic results (for this algorithm and the -// underlying algorithm which will use random numbers for position vectors when -// starting out with everything at the origin). -func Simulate(clients []*Client, truth [][]time.Duration, cycles int) { - rand.Seed(1) - - nodes := len(clients) - for cycle := 0; cycle < cycles; cycle++ { - for i, _ := range clients { - if j := rand.Intn(nodes); j != i { - c := clients[j].GetCoordinate() - rtt := truth[i][j] - node := fmt.Sprintf("node_%d", j) - clients[i].Update(node, c, rtt) - } - } - } -} - -// Stats is returned from the Evaluate function with a summary of the algorithm -// performance. -type Stats struct { - ErrorMax float64 - ErrorAvg float64 -} - -// Evaluate uses the coordinates of the given clients to calculate estimated -// distances and compares them with the given truth matrix, returning summary -// stats. -func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) { - nodes := len(clients) - count := 0 - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds() - actual := truth[i][j].Seconds() - error := math.Abs(est-actual) / actual - stats.ErrorMax = math.Max(stats.ErrorMax, error) - stats.ErrorAvg += error - count += 1 - } - } - - stats.ErrorAvg /= float64(count) - fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax) - return -} diff --git a/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright b/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright deleted file mode 100644 index 21a1a1b532..0000000000 --- a/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright +++ /dev/null @@ -1,2 +0,0 @@ -Name: serf -Copyright: Hashicorp 2013 diff --git a/vendor/github.com/hashicorp/serf/website/source/LICENSE b/vendor/github.com/hashicorp/serf/website/source/LICENSE deleted file mode 100644 index 36c29d7f7b..0000000000 --- a/vendor/github.com/hashicorp/serf/website/source/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -# Proprietary License - -This license is temporary while a more official one is drafted. However, -this should make it clear: - -* The text contents of this website are MPL 2.0 licensed. - -* The design contents of this website are proprietary and may not be reproduced - or reused in any way other than to run the Serf website locally. The license - for the design is owned solely by HashiCorp, Inc. diff --git a/vendor/github.com/howeyc/crc16/.travis.yml b/vendor/github.com/howeyc/crc16/.travis.yml deleted file mode 100644 index 4f2ee4d973..0000000000 --- a/vendor/github.com/howeyc/crc16/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/howeyc/crc16/LICENSE b/vendor/github.com/howeyc/crc16/LICENSE deleted file mode 100644 index 7448756763..0000000000 --- a/vendor/github.com/howeyc/crc16/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/howeyc/crc16/README.md b/vendor/github.com/howeyc/crc16/README.md deleted file mode 100644 index bd4bf0f3e1..0000000000 --- a/vendor/github.com/howeyc/crc16/README.md +++ /dev/null @@ -1,34 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/howeyc/crc16?status.svg)](https://godoc.org/github.com/howeyc/crc16) [![Build Status](https://secure.travis-ci.org/howeyc/crc16.png?branch=master)](http://travis-ci.org/howeyc/crc16) - -# CRC16 -A Go package implementing the 16-bit Cyclic Redundancy Check, or CRC-16, checksum. - -## Usage -To generate the hash of a byte slice, use the [`crc16.Checksum()`](https://godoc.org/github.com/howeyc/crc16#Checksum) function: -```golang -import "github.com/howeyc/crc16" - -data := byte("test") -checksum := crc16.Checksum(data, crc16.IBMTable) -``` - -The package provides [the following](https://godoc.org/github.com/howeyc/crc16#pkg-variables) hashing tables. For each of these tables, a shorthand can be used. -```golang -// This is the same as crc16.Checksum(data, crc16.IBMTable) -checksum := crc16.ChecksumIBM(data) -``` - -Using the [hash.Hash](https://godoc.org/hash#Hash) interface also works. -```go -h := crc16.New(crc16.IBMTable) -data := byte("test") -data2 := byte("data") -h.Write(data) -h.Write(data2) -checksum := h.Sum(nil) -``` - -## Changelog -* 2017.03.27 - Added MBus checksum -* 2017.05.27 - Added checksum function without XOR -* 2017.12.08 - Implement encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to allow saving and recreating their internal state. diff --git a/vendor/github.com/howeyc/crc16/crc16.go b/vendor/github.com/howeyc/crc16/crc16.go deleted file mode 100644 index 917a3a3a98..0000000000 --- a/vendor/github.com/howeyc/crc16/crc16.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package crc16 - -// Predefined polynomials. -const ( - // IBM is used by Bisync, Modbus, USB, ANSI X3.28, SIA DC-07, ... - IBM = 0xA001 - - // CCITT is used by X.25, V.41, HDLC FCS, XMODEM, Bluetooth, PACTOR, SD, ... - // CCITT forward is 0x8408. Reverse is 0x1021. - CCITT = 0x8408 - CCITTFalse = 0x1021 - - // SCSI is used by SCSI - SCSI = 0xEDD1 - - // MBUS is used by Meter-Bus, DNP, ... - MBUS = 0x3D65 -) - -// Table is a 256-word table representing the polynomial for efficient processing. -type Table struct { - entries [256]uint16 - reversed bool - noXOR bool -} - -// IBMTable is the table for the IBM polynomial. -var IBMTable = makeTable(IBM) - -// CCITTTable is the table for the CCITT polynomial. -var CCITTTable = makeTable(CCITT) - -// CCITTFalseTable is the table for CCITT-FALSE. -var CCITTFalseTable = makeBitsReversedTable(CCITTFalse) - -// SCSITable is the table for the SCSI polynomial. -var SCSITable = makeTable(SCSI) - -// MBusTable is the tabe used for Meter-Bus polynomial. -var MBusTable = makeBitsReversedTable(MBUS) - -// MakeTable returns the Table constructed from the specified polynomial. -func MakeTable(poly uint16) *Table { - return makeTable(poly) -} - -// MakeBitsReversedTable returns the Table constructed from the specified polynomial. -func MakeBitsReversedTable(poly uint16) *Table { - return makeBitsReversedTable(poly) -} - -// MakeTableNoXOR returns the Table constructed from the specified polynomial. -// Updates happen without XOR in and XOR out. -func MakeTableNoXOR(poly uint16) *Table { - tab := makeTable(poly) - tab.noXOR = true - return tab -} - -// makeTable returns the Table constructed from the specified polynomial. -func makeBitsReversedTable(poly uint16) *Table { - t := &Table{ - reversed: true, - } - width := uint16(16) - for i := uint16(0); i < 256; i++ { - crc := i << (width - 8) - for j := 0; j < 8; j++ { - if crc&(1<<(width-1)) != 0 { - crc = (crc << 1) ^ poly - } else { - crc <<= 1 - } - } - t.entries[i] = crc - } - return t -} - -func makeTable(poly uint16) *Table { - t := &Table{ - reversed: false, - } - for i := 0; i < 256; i++ { - crc := uint16(i) - for j := 0; j < 8; j++ { - if crc&1 == 1 { - crc = (crc >> 1) ^ poly - } else { - crc >>= 1 - } - } - t.entries[i] = crc - } - return t -} - -func updateBitsReversed(crc uint16, tab *Table, p []byte) uint16 { - for _, v := range p { - crc = tab.entries[byte(crc>>8)^v] ^ (crc << 8) - } - return crc -} - -func update(crc uint16, tab *Table, p []byte) uint16 { - crc = ^crc - - for _, v := range p { - crc = tab.entries[byte(crc)^v] ^ (crc >> 8) - } - - return ^crc -} - -func updateNoXOR(crc uint16, tab *Table, p []byte) uint16 { - for _, v := range p { - crc = tab.entries[byte(crc)^v] ^ (crc >> 8) - } - - return crc -} - -// Update returns the result of adding the bytes in p to the crc. -func Update(crc uint16, tab *Table, p []byte) uint16 { - if tab.reversed { - return updateBitsReversed(crc, tab, p) - } else if tab.noXOR { - return updateNoXOR(crc, tab, p) - } else { - return update(crc, tab, p) - } -} - -// Checksum returns the CRC-16 checksum of data -// using the polynomial represented by the Table. -func Checksum(data []byte, tab *Table) uint16 { return Update(0, tab, data) } - -// ChecksumIBM returns the CRC-16 checksum of data -// using the IBM polynomial. -func ChecksumIBM(data []byte) uint16 { return Update(0, IBMTable, data) } - -// ChecksumCCITTFalse returns the CRC-16 checksum using -// what some call the CCITT-False polynomial, which matches what is used -// by Perl Digest/CRC and Boost for example. -func ChecksumCCITTFalse(data []byte) uint16 { return Update(0xffff, CCITTFalseTable, data) } - -// ChecksumCCITT returns the CRC-16 checksum of data -// using the CCITT polynomial. -func ChecksumCCITT(data []byte) uint16 { return Update(0, CCITTTable, data) } - -// ChecksumSCSI returns the CRC-16 checksum of data -// using the SCSI polynomial. -func ChecksumSCSI(data []byte) uint16 { return Update(0, SCSITable, data) } - -// ChecksumMBus returns the CRC-16 checksum of data -// using the MBus polynomial. -func ChecksumMBus(data []byte) uint16 { return Update(0, MBusTable, data) } diff --git a/vendor/github.com/howeyc/crc16/hash.go b/vendor/github.com/howeyc/crc16/hash.go deleted file mode 100644 index 1d10a4eeb1..0000000000 --- a/vendor/github.com/howeyc/crc16/hash.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package crc16 - -import ( - "errors" - "hash" -) - -// Hash16 is the common interface implemented by all 16-bit hash functions. -type Hash16 interface { - hash.Hash - Sum16() uint16 -} - -// New creates a new Hash16 computing the CRC-16 checksum -// using the polynomial represented by the Table. -func New(tab *Table) Hash16 { return &digest{0, tab} } - -// NewIBM creates a new Hash16 computing the CRC-16 checksum -// using the IBM polynomial. -func NewIBM() Hash16 { return New(IBMTable) } - -// NewCCITT creates a new hash.Hash16 computing the CRC-16 checksum -// using the CCITT polynomial. -func NewCCITT() Hash16 { return New(CCITTTable) } - -// NewSCSI creates a new Hash16 computing the CRC-16 checksum -// using the SCSI polynomial. -func NewSCSI() Hash16 { return New(SCSITable) } - -// digest represents the partial evaluation of a checksum. -type digest struct { - crc uint16 - tab *Table -} - -func (d *digest) Size() int { return 2 } - -func (d *digest) BlockSize() int { return 1 } - -func (d *digest) Reset() { d.crc = 0 } - -func (d *digest) Write(p []byte) (n int, err error) { - d.crc = Update(d.crc, d.tab, p) - return len(p), nil -} - -func (d *digest) Sum16() uint16 { return d.crc } - -func (d *digest) Sum(in []byte) []byte { - s := d.Sum16() - return append(in, byte(s>>8), byte(s)) -} - -const ( - magic = "crc16\x01" - marshaledSize = len(magic) + 2 + 2 + 1 -) - -func (d *digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint16(b, tableSum(d.tab)) - b = appendUint16(b, d.crc) - if d.tab.reversed { - b = append(b, byte(0x01)) - } else { - b = append(b, byte(0x00)) - } - return b, nil -} - -func (d *digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("hash/crc16: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("hash/crc16: invalid hash state size") - } - if tableSum(d.tab) != readUint16(b[6:]) { - return errors.New("hash/crc16: tables do not match") - } - d.crc = readUint16(b[8:]) - if b[10] == 0x01 { - d.tab.reversed = true - } - return nil -} - -func appendUint16(b []byte, x uint16) []byte { - a := [2]byte{ - byte(x >> 8), - byte(x), - } - return append(b, a[:]...) -} - -func readUint16(b []byte) uint16 { - _ = b[1] - return uint16(b[1]) | uint16(b[0])<<8 -} - -// tableSum returns the IBM checksum of table t. -func tableSum(t *Table) uint16 { - var a [1024]byte - b := a[:0] - if t != nil { - for _, x := range t.entries { - b = appendUint16(b, x) - } - } - return ChecksumIBM(b) -} diff --git a/vendor/github.com/kr/pretty/.gitignore b/vendor/github.com/kr/pretty/.gitignore deleted file mode 100644 index 1f0a99f2f2..0000000000 --- a/vendor/github.com/kr/pretty/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -[568].out -_go* -_test* -_obj diff --git a/vendor/github.com/kr/pretty/License b/vendor/github.com/kr/pretty/License deleted file mode 100644 index 05c783ccf6..0000000000 --- a/vendor/github.com/kr/pretty/License +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright 2012 Keith Rarick - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/kr/pretty/Readme b/vendor/github.com/kr/pretty/Readme deleted file mode 100644 index c589fc622b..0000000000 --- a/vendor/github.com/kr/pretty/Readme +++ /dev/null @@ -1,9 +0,0 @@ -package pretty - - import "github.com/kr/pretty" - - Package pretty provides pretty-printing for Go values. - -Documentation - - http://godoc.org/github.com/kr/pretty diff --git a/vendor/github.com/kr/pretty/diff.go b/vendor/github.com/kr/pretty/diff.go deleted file mode 100644 index 6aa7f743a2..0000000000 --- a/vendor/github.com/kr/pretty/diff.go +++ /dev/null @@ -1,265 +0,0 @@ -package pretty - -import ( - "fmt" - "io" - "reflect" -) - -type sbuf []string - -func (p *sbuf) Printf(format string, a ...interface{}) { - s := fmt.Sprintf(format, a...) - *p = append(*p, s) -} - -// Diff returns a slice where each element describes -// a difference between a and b. -func Diff(a, b interface{}) (desc []string) { - Pdiff((*sbuf)(&desc), a, b) - return desc -} - -// wprintfer calls Fprintf on w for each Printf call -// with a trailing newline. -type wprintfer struct{ w io.Writer } - -func (p *wprintfer) Printf(format string, a ...interface{}) { - fmt.Fprintf(p.w, format+"\n", a...) -} - -// Fdiff writes to w a description of the differences between a and b. -func Fdiff(w io.Writer, a, b interface{}) { - Pdiff(&wprintfer{w}, a, b) -} - -type Printfer interface { - Printf(format string, a ...interface{}) -} - -// Pdiff prints to p a description of the differences between a and b. -// It calls Printf once for each difference, with no trailing newline. -// The standard library log.Logger is a Printfer. -func Pdiff(p Printfer, a, b interface{}) { - diffPrinter{w: p}.diff(reflect.ValueOf(a), reflect.ValueOf(b)) -} - -type Logfer interface { - Logf(format string, a ...interface{}) -} - -// logprintfer calls Fprintf on w for each Printf call -// with a trailing newline. -type logprintfer struct{ l Logfer } - -func (p *logprintfer) Printf(format string, a ...interface{}) { - p.l.Logf(format, a...) -} - -// Ldiff prints to l a description of the differences between a and b. -// It calls Logf once for each difference, with no trailing newline. -// The standard library testing.T and testing.B are Logfers. -func Ldiff(l Logfer, a, b interface{}) { - Pdiff(&logprintfer{l}, a, b) -} - -type diffPrinter struct { - w Printfer - l string // label -} - -func (w diffPrinter) printf(f string, a ...interface{}) { - var l string - if w.l != "" { - l = w.l + ": " - } - w.w.Printf(l+f, a...) -} - -func (w diffPrinter) diff(av, bv reflect.Value) { - if !av.IsValid() && bv.IsValid() { - w.printf("nil != %# v", formatter{v: bv, quote: true}) - return - } - if av.IsValid() && !bv.IsValid() { - w.printf("%# v != nil", formatter{v: av, quote: true}) - return - } - if !av.IsValid() && !bv.IsValid() { - return - } - - at := av.Type() - bt := bv.Type() - if at != bt { - w.printf("%v != %v", at, bt) - return - } - - switch kind := at.Kind(); kind { - case reflect.Bool: - if a, b := av.Bool(), bv.Bool(); a != b { - w.printf("%v != %v", a, b) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if a, b := av.Int(), bv.Int(); a != b { - w.printf("%d != %d", a, b) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - if a, b := av.Uint(), bv.Uint(); a != b { - w.printf("%d != %d", a, b) - } - case reflect.Float32, reflect.Float64: - if a, b := av.Float(), bv.Float(); a != b { - w.printf("%v != %v", a, b) - } - case reflect.Complex64, reflect.Complex128: - if a, b := av.Complex(), bv.Complex(); a != b { - w.printf("%v != %v", a, b) - } - case reflect.Array: - n := av.Len() - for i := 0; i < n; i++ { - w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) - } - case reflect.Chan, reflect.Func, reflect.UnsafePointer: - if a, b := av.Pointer(), bv.Pointer(); a != b { - w.printf("%#x != %#x", a, b) - } - case reflect.Interface: - w.diff(av.Elem(), bv.Elem()) - case reflect.Map: - ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys()) - for _, k := range ak { - w := w.relabel(fmt.Sprintf("[%#v]", k)) - w.printf("%q != (missing)", av.MapIndex(k)) - } - for _, k := range both { - w := w.relabel(fmt.Sprintf("[%#v]", k)) - w.diff(av.MapIndex(k), bv.MapIndex(k)) - } - for _, k := range bk { - w := w.relabel(fmt.Sprintf("[%#v]", k)) - w.printf("(missing) != %q", bv.MapIndex(k)) - } - case reflect.Ptr: - switch { - case av.IsNil() && !bv.IsNil(): - w.printf("nil != %# v", formatter{v: bv, quote: true}) - case !av.IsNil() && bv.IsNil(): - w.printf("%# v != nil", formatter{v: av, quote: true}) - case !av.IsNil() && !bv.IsNil(): - w.diff(av.Elem(), bv.Elem()) - } - case reflect.Slice: - lenA := av.Len() - lenB := bv.Len() - if lenA != lenB { - w.printf("%s[%d] != %s[%d]", av.Type(), lenA, bv.Type(), lenB) - break - } - for i := 0; i < lenA; i++ { - w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) - } - case reflect.String: - if a, b := av.String(), bv.String(); a != b { - w.printf("%q != %q", a, b) - } - case reflect.Struct: - for i := 0; i < av.NumField(); i++ { - w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i)) - } - default: - panic("unknown reflect Kind: " + kind.String()) - } -} - -func (d diffPrinter) relabel(name string) (d1 diffPrinter) { - d1 = d - if d.l != "" && name[0] != '[' { - d1.l += "." - } - d1.l += name - return d1 -} - -// keyEqual compares a and b for equality. -// Both a and b must be valid map keys. -func keyEqual(av, bv reflect.Value) bool { - if !av.IsValid() && !bv.IsValid() { - return true - } - if !av.IsValid() || !bv.IsValid() || av.Type() != bv.Type() { - return false - } - switch kind := av.Kind(); kind { - case reflect.Bool: - a, b := av.Bool(), bv.Bool() - return a == b - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - a, b := av.Int(), bv.Int() - return a == b - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - a, b := av.Uint(), bv.Uint() - return a == b - case reflect.Float32, reflect.Float64: - a, b := av.Float(), bv.Float() - return a == b - case reflect.Complex64, reflect.Complex128: - a, b := av.Complex(), bv.Complex() - return a == b - case reflect.Array: - for i := 0; i < av.Len(); i++ { - if !keyEqual(av.Index(i), bv.Index(i)) { - return false - } - } - return true - case reflect.Chan, reflect.UnsafePointer, reflect.Ptr: - a, b := av.Pointer(), bv.Pointer() - return a == b - case reflect.Interface: - return keyEqual(av.Elem(), bv.Elem()) - case reflect.String: - a, b := av.String(), bv.String() - return a == b - case reflect.Struct: - for i := 0; i < av.NumField(); i++ { - if !keyEqual(av.Field(i), bv.Field(i)) { - return false - } - } - return true - default: - panic("invalid map key type " + av.Type().String()) - } -} - -func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) { - for _, av := range a { - inBoth := false - for _, bv := range b { - if keyEqual(av, bv) { - inBoth = true - both = append(both, av) - break - } - } - if !inBoth { - ak = append(ak, av) - } - } - for _, bv := range b { - inBoth := false - for _, av := range a { - if keyEqual(av, bv) { - inBoth = true - break - } - } - if !inBoth { - bk = append(bk, bv) - } - } - return -} diff --git a/vendor/github.com/kr/pretty/formatter.go b/vendor/github.com/kr/pretty/formatter.go deleted file mode 100644 index a317d7b8ee..0000000000 --- a/vendor/github.com/kr/pretty/formatter.go +++ /dev/null @@ -1,328 +0,0 @@ -package pretty - -import ( - "fmt" - "io" - "reflect" - "strconv" - "text/tabwriter" - - "github.com/kr/text" -) - -type formatter struct { - v reflect.Value - force bool - quote bool -} - -// Formatter makes a wrapper, f, that will format x as go source with line -// breaks and tabs. Object f responds to the "%v" formatting verb when both the -// "#" and " " (space) flags are set, for example: -// -// fmt.Sprintf("%# v", Formatter(x)) -// -// If one of these two flags is not set, or any other verb is used, f will -// format x according to the usual rules of package fmt. -// In particular, if x satisfies fmt.Formatter, then x.Format will be called. -func Formatter(x interface{}) (f fmt.Formatter) { - return formatter{v: reflect.ValueOf(x), quote: true} -} - -func (fo formatter) String() string { - return fmt.Sprint(fo.v.Interface()) // unwrap it -} - -func (fo formatter) passThrough(f fmt.State, c rune) { - s := "%" - for i := 0; i < 128; i++ { - if f.Flag(i) { - s += string(i) - } - } - if w, ok := f.Width(); ok { - s += fmt.Sprintf("%d", w) - } - if p, ok := f.Precision(); ok { - s += fmt.Sprintf(".%d", p) - } - s += string(c) - fmt.Fprintf(f, s, fo.v.Interface()) -} - -func (fo formatter) Format(f fmt.State, c rune) { - if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') { - w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0) - p := &printer{tw: w, Writer: w, visited: make(map[visit]int)} - p.printValue(fo.v, true, fo.quote) - w.Flush() - return - } - fo.passThrough(f, c) -} - -type printer struct { - io.Writer - tw *tabwriter.Writer - visited map[visit]int - depth int -} - -func (p *printer) indent() *printer { - q := *p - q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0) - q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'}) - return &q -} - -func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) { - if showType { - io.WriteString(p, v.Type().String()) - fmt.Fprintf(p, "(%#v)", x) - } else { - fmt.Fprintf(p, "%#v", x) - } -} - -// printValue must keep track of already-printed pointer values to avoid -// infinite recursion. -type visit struct { - v uintptr - typ reflect.Type -} - -func (p *printer) printValue(v reflect.Value, showType, quote bool) { - if p.depth > 10 { - io.WriteString(p, "!%v(DEPTH EXCEEDED)") - return - } - - switch v.Kind() { - case reflect.Bool: - p.printInline(v, v.Bool(), showType) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p.printInline(v, v.Int(), showType) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p.printInline(v, v.Uint(), showType) - case reflect.Float32, reflect.Float64: - p.printInline(v, v.Float(), showType) - case reflect.Complex64, reflect.Complex128: - fmt.Fprintf(p, "%#v", v.Complex()) - case reflect.String: - p.fmtString(v.String(), quote) - case reflect.Map: - t := v.Type() - if showType { - io.WriteString(p, t.String()) - } - writeByte(p, '{') - if nonzero(v) { - expand := !canInline(v.Type()) - pp := p - if expand { - writeByte(p, '\n') - pp = p.indent() - } - keys := v.MapKeys() - for i := 0; i < v.Len(); i++ { - showTypeInStruct := true - k := keys[i] - mv := v.MapIndex(k) - pp.printValue(k, false, true) - writeByte(pp, ':') - if expand { - writeByte(pp, '\t') - } - showTypeInStruct = t.Elem().Kind() == reflect.Interface - pp.printValue(mv, showTypeInStruct, true) - if expand { - io.WriteString(pp, ",\n") - } else if i < v.Len()-1 { - io.WriteString(pp, ", ") - } - } - if expand { - pp.tw.Flush() - } - } - writeByte(p, '}') - case reflect.Struct: - t := v.Type() - if v.CanAddr() { - addr := v.UnsafeAddr() - vis := visit{addr, t} - if vd, ok := p.visited[vis]; ok && vd < p.depth { - p.fmtString(t.String()+"{(CYCLIC REFERENCE)}", false) - break // don't print v again - } - p.visited[vis] = p.depth - } - - if showType { - io.WriteString(p, t.String()) - } - writeByte(p, '{') - if nonzero(v) { - expand := !canInline(v.Type()) - pp := p - if expand { - writeByte(p, '\n') - pp = p.indent() - } - for i := 0; i < v.NumField(); i++ { - showTypeInStruct := true - if f := t.Field(i); f.Name != "" { - io.WriteString(pp, f.Name) - writeByte(pp, ':') - if expand { - writeByte(pp, '\t') - } - showTypeInStruct = labelType(f.Type) - } - pp.printValue(getField(v, i), showTypeInStruct, true) - if expand { - io.WriteString(pp, ",\n") - } else if i < v.NumField()-1 { - io.WriteString(pp, ", ") - } - } - if expand { - pp.tw.Flush() - } - } - writeByte(p, '}') - case reflect.Interface: - switch e := v.Elem(); { - case e.Kind() == reflect.Invalid: - io.WriteString(p, "nil") - case e.IsValid(): - pp := *p - pp.depth++ - pp.printValue(e, showType, true) - default: - io.WriteString(p, v.Type().String()) - io.WriteString(p, "(nil)") - } - case reflect.Array, reflect.Slice: - t := v.Type() - if showType { - io.WriteString(p, t.String()) - } - if v.Kind() == reflect.Slice && v.IsNil() && showType { - io.WriteString(p, "(nil)") - break - } - if v.Kind() == reflect.Slice && v.IsNil() { - io.WriteString(p, "nil") - break - } - writeByte(p, '{') - expand := !canInline(v.Type()) - pp := p - if expand { - writeByte(p, '\n') - pp = p.indent() - } - for i := 0; i < v.Len(); i++ { - showTypeInSlice := t.Elem().Kind() == reflect.Interface - pp.printValue(v.Index(i), showTypeInSlice, true) - if expand { - io.WriteString(pp, ",\n") - } else if i < v.Len()-1 { - io.WriteString(pp, ", ") - } - } - if expand { - pp.tw.Flush() - } - writeByte(p, '}') - case reflect.Ptr: - e := v.Elem() - if !e.IsValid() { - writeByte(p, '(') - io.WriteString(p, v.Type().String()) - io.WriteString(p, ")(nil)") - } else { - pp := *p - pp.depth++ - writeByte(pp, '&') - pp.printValue(e, true, true) - } - case reflect.Chan: - x := v.Pointer() - if showType { - writeByte(p, '(') - io.WriteString(p, v.Type().String()) - fmt.Fprintf(p, ")(%#v)", x) - } else { - fmt.Fprintf(p, "%#v", x) - } - case reflect.Func: - io.WriteString(p, v.Type().String()) - io.WriteString(p, " {...}") - case reflect.UnsafePointer: - p.printInline(v, v.Pointer(), showType) - case reflect.Invalid: - io.WriteString(p, "nil") - } -} - -func canInline(t reflect.Type) bool { - switch t.Kind() { - case reflect.Map: - return !canExpand(t.Elem()) - case reflect.Struct: - for i := 0; i < t.NumField(); i++ { - if canExpand(t.Field(i).Type) { - return false - } - } - return true - case reflect.Interface: - return false - case reflect.Array, reflect.Slice: - return !canExpand(t.Elem()) - case reflect.Ptr: - return false - case reflect.Chan, reflect.Func, reflect.UnsafePointer: - return false - } - return true -} - -func canExpand(t reflect.Type) bool { - switch t.Kind() { - case reflect.Map, reflect.Struct, - reflect.Interface, reflect.Array, reflect.Slice, - reflect.Ptr: - return true - } - return false -} - -func labelType(t reflect.Type) bool { - switch t.Kind() { - case reflect.Interface, reflect.Struct: - return true - } - return false -} - -func (p *printer) fmtString(s string, quote bool) { - if quote { - s = strconv.Quote(s) - } - io.WriteString(p, s) -} - -func writeByte(w io.Writer, b byte) { - w.Write([]byte{b}) -} - -func getField(v reflect.Value, i int) reflect.Value { - val := v.Field(i) - if val.Kind() == reflect.Interface && !val.IsNil() { - val = val.Elem() - } - return val -} diff --git a/vendor/github.com/kr/pretty/pretty.go b/vendor/github.com/kr/pretty/pretty.go deleted file mode 100644 index 49423ec7f5..0000000000 --- a/vendor/github.com/kr/pretty/pretty.go +++ /dev/null @@ -1,108 +0,0 @@ -// Package pretty provides pretty-printing for Go values. This is -// useful during debugging, to avoid wrapping long output lines in -// the terminal. -// -// It provides a function, Formatter, that can be used with any -// function that accepts a format string. It also provides -// convenience wrappers for functions in packages fmt and log. -package pretty - -import ( - "fmt" - "io" - "log" - "reflect" -) - -// Errorf is a convenience wrapper for fmt.Errorf. -// -// Calling Errorf(f, x, y) is equivalent to -// fmt.Errorf(f, Formatter(x), Formatter(y)). -func Errorf(format string, a ...interface{}) error { - return fmt.Errorf(format, wrap(a, false)...) -} - -// Fprintf is a convenience wrapper for fmt.Fprintf. -// -// Calling Fprintf(w, f, x, y) is equivalent to -// fmt.Fprintf(w, f, Formatter(x), Formatter(y)). -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) { - return fmt.Fprintf(w, format, wrap(a, false)...) -} - -// Log is a convenience wrapper for log.Printf. -// -// Calling Log(x, y) is equivalent to -// log.Print(Formatter(x), Formatter(y)), but each operand is -// formatted with "%# v". -func Log(a ...interface{}) { - log.Print(wrap(a, true)...) -} - -// Logf is a convenience wrapper for log.Printf. -// -// Calling Logf(f, x, y) is equivalent to -// log.Printf(f, Formatter(x), Formatter(y)). -func Logf(format string, a ...interface{}) { - log.Printf(format, wrap(a, false)...) -} - -// Logln is a convenience wrapper for log.Printf. -// -// Calling Logln(x, y) is equivalent to -// log.Println(Formatter(x), Formatter(y)), but each operand is -// formatted with "%# v". -func Logln(a ...interface{}) { - log.Println(wrap(a, true)...) -} - -// Print pretty-prints its operands and writes to standard output. -// -// Calling Print(x, y) is equivalent to -// fmt.Print(Formatter(x), Formatter(y)), but each operand is -// formatted with "%# v". -func Print(a ...interface{}) (n int, errno error) { - return fmt.Print(wrap(a, true)...) -} - -// Printf is a convenience wrapper for fmt.Printf. -// -// Calling Printf(f, x, y) is equivalent to -// fmt.Printf(f, Formatter(x), Formatter(y)). -func Printf(format string, a ...interface{}) (n int, errno error) { - return fmt.Printf(format, wrap(a, false)...) -} - -// Println pretty-prints its operands and writes to standard output. -// -// Calling Print(x, y) is equivalent to -// fmt.Println(Formatter(x), Formatter(y)), but each operand is -// formatted with "%# v". -func Println(a ...interface{}) (n int, errno error) { - return fmt.Println(wrap(a, true)...) -} - -// Sprint is a convenience wrapper for fmt.Sprintf. -// -// Calling Sprint(x, y) is equivalent to -// fmt.Sprint(Formatter(x), Formatter(y)), but each operand is -// formatted with "%# v". -func Sprint(a ...interface{}) string { - return fmt.Sprint(wrap(a, true)...) -} - -// Sprintf is a convenience wrapper for fmt.Sprintf. -// -// Calling Sprintf(f, x, y) is equivalent to -// fmt.Sprintf(f, Formatter(x), Formatter(y)). -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, wrap(a, false)...) -} - -func wrap(a []interface{}, force bool) []interface{} { - w := make([]interface{}, len(a)) - for i, x := range a { - w[i] = formatter{v: reflect.ValueOf(x), force: force} - } - return w -} diff --git a/vendor/github.com/kr/pretty/zero.go b/vendor/github.com/kr/pretty/zero.go deleted file mode 100644 index abb5b6fc14..0000000000 --- a/vendor/github.com/kr/pretty/zero.go +++ /dev/null @@ -1,41 +0,0 @@ -package pretty - -import ( - "reflect" -) - -func nonzero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() != 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() != 0 - case reflect.Float32, reflect.Float64: - return v.Float() != 0 - case reflect.Complex64, reflect.Complex128: - return v.Complex() != complex(0, 0) - case reflect.String: - return v.String() != "" - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if nonzero(getField(v, i)) { - return true - } - } - return false - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if nonzero(v.Index(i)) { - return true - } - } - return false - case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func: - return !v.IsNil() - case reflect.UnsafePointer: - return v.Pointer() != 0 - } - return true -} diff --git a/vendor/github.com/kr/text/License b/vendor/github.com/kr/text/License deleted file mode 100644 index 480a328059..0000000000 --- a/vendor/github.com/kr/text/License +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2012 Keith Rarick - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/kr/text/Readme b/vendor/github.com/kr/text/Readme deleted file mode 100644 index 7e6e7c0687..0000000000 --- a/vendor/github.com/kr/text/Readme +++ /dev/null @@ -1,3 +0,0 @@ -This is a Go package for manipulating paragraphs of text. - -See http://go.pkgdoc.org/github.com/kr/text for full documentation. diff --git a/vendor/github.com/kr/text/doc.go b/vendor/github.com/kr/text/doc.go deleted file mode 100644 index cf4c198f95..0000000000 --- a/vendor/github.com/kr/text/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package text provides rudimentary functions for manipulating text in -// paragraphs. -package text diff --git a/vendor/github.com/kr/text/indent.go b/vendor/github.com/kr/text/indent.go deleted file mode 100644 index 4ebac45c09..0000000000 --- a/vendor/github.com/kr/text/indent.go +++ /dev/null @@ -1,74 +0,0 @@ -package text - -import ( - "io" -) - -// Indent inserts prefix at the beginning of each non-empty line of s. The -// end-of-line marker is NL. -func Indent(s, prefix string) string { - return string(IndentBytes([]byte(s), []byte(prefix))) -} - -// IndentBytes inserts prefix at the beginning of each non-empty line of b. -// The end-of-line marker is NL. -func IndentBytes(b, prefix []byte) []byte { - var res []byte - bol := true - for _, c := range b { - if bol && c != '\n' { - res = append(res, prefix...) - } - res = append(res, c) - bol = c == '\n' - } - return res -} - -// Writer indents each line of its input. -type indentWriter struct { - w io.Writer - bol bool - pre [][]byte - sel int - off int -} - -// NewIndentWriter makes a new write filter that indents the input -// lines. Each line is prefixed in order with the corresponding -// element of pre. If there are more lines than elements, the last -// element of pre is repeated for each subsequent line. -func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer { - return &indentWriter{ - w: w, - pre: pre, - bol: true, - } -} - -// The only errors returned are from the underlying indentWriter. -func (w *indentWriter) Write(p []byte) (n int, err error) { - for _, c := range p { - if w.bol { - var i int - i, err = w.w.Write(w.pre[w.sel][w.off:]) - w.off += i - if err != nil { - return n, err - } - } - _, err = w.w.Write([]byte{c}) - if err != nil { - return n, err - } - n++ - w.bol = c == '\n' - if w.bol { - w.off = 0 - if w.sel < len(w.pre)-1 { - w.sel++ - } - } - } - return n, nil -} diff --git a/vendor/github.com/kr/text/wrap.go b/vendor/github.com/kr/text/wrap.go deleted file mode 100644 index b09bb03736..0000000000 --- a/vendor/github.com/kr/text/wrap.go +++ /dev/null @@ -1,86 +0,0 @@ -package text - -import ( - "bytes" - "math" -) - -var ( - nl = []byte{'\n'} - sp = []byte{' '} -) - -const defaultPenalty = 1e5 - -// Wrap wraps s into a paragraph of lines of length lim, with minimal -// raggedness. -func Wrap(s string, lim int) string { - return string(WrapBytes([]byte(s), lim)) -} - -// WrapBytes wraps b into a paragraph of lines of length lim, with minimal -// raggedness. -func WrapBytes(b []byte, lim int) []byte { - words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp) - var lines [][]byte - for _, line := range WrapWords(words, 1, lim, defaultPenalty) { - lines = append(lines, bytes.Join(line, sp)) - } - return bytes.Join(lines, nl) -} - -// WrapWords is the low-level line-breaking algorithm, useful if you need more -// control over the details of the text wrapping process. For most uses, either -// Wrap or WrapBytes will be sufficient and more convenient. -// -// WrapWords splits a list of words into lines with minimal "raggedness", -// treating each byte as one unit, accounting for spc units between adjacent -// words on each line, and attempting to limit lines to lim units. Raggedness -// is the total error over all lines, where error is the square of the -// difference of the length of the line and lim. Too-long lines (which only -// happen when a single word is longer than lim units) have pen penalty units -// added to the error. -func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte { - n := len(words) - - length := make([][]int, n) - for i := 0; i < n; i++ { - length[i] = make([]int, n) - length[i][i] = len(words[i]) - for j := i + 1; j < n; j++ { - length[i][j] = length[i][j-1] + spc + len(words[j]) - } - } - - nbrk := make([]int, n) - cost := make([]int, n) - for i := range cost { - cost[i] = math.MaxInt32 - } - for i := n - 1; i >= 0; i-- { - if length[i][n-1] <= lim || i == n-1 { - cost[i] = 0 - nbrk[i] = n - } else { - for j := i + 1; j < n; j++ { - d := lim - length[i][j-1] - c := d*d + cost[j] - if length[i][j-1] > lim { - c += pen // too-long lines get a worse penalty - } - if c < cost[i] { - cost[i] = c - nbrk[i] = j - } - } - } - } - - var lines [][][]byte - i := 0 - for i < n { - lines = append(lines, words[i:nbrk[i]]) - i = nbrk[i] - } - return lines -} diff --git a/vendor/github.com/ligato/cn-infra/agent/agent.go b/vendor/github.com/ligato/cn-infra/agent/agent.go new file mode 100644 index 0000000000..2bf9654e0e --- /dev/null +++ b/vendor/github.com/ligato/cn-infra/agent/agent.go @@ -0,0 +1,269 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package agent + +import ( + "errors" + "os" + "os/signal" + + "github.com/ligato/cn-infra/config" + "github.com/ligato/cn-infra/infra" + "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/utils/once" + "github.com/namsral/flag" +) + +// Variables set by the compiler using ldflags +var ( + // BuildVersion describes version for the build. It is usually set using `git describe --always --tags --dirty`. + BuildVersion = "dev" + // BuildDate describes time of the build. + BuildDate string + // CommitHash describes commit hash for the build. + CommitHash string +) + +// Agent implements startup & shutdown procedures for plugins. +type Agent interface { + // Run is a blocking call which starts the agent with all of its plugins, + // waits for a signal from OS (SIGINT, SIGTERM by default), context cancellation or + // close of quit channel (can be set via options) and then stops the agent. + // Returns nil if all the plugins were intialized and closed successfully. + Run() error + // Start starts the agent with all the plugins, calling their Init() and optionally AfterInit(). + // Returns nil if all the plugins were initialized successfully. + Start() error + // Stop stops the agent with all the plugins, calling their Close(). + // Returns nil if all the plugins were closed successfully. + Stop() error + // Options returns all agent's options configured via constructor. + Options() Options + + // Wait waits until agent is stopped and returns same error as Stop(). + Wait() error + // After returns a channel that is closed before the agents is stopped. + // Note: It is not certain the all plugins are stopped, see Error().. + After() <-chan struct{} + // Error returns an error that occurret when the agent was stopped. + // Note: This essentially just calls Stop().. + Error() error +} + +// NewAgent creates a new agent using given options and registers all flags +// defined for plugins via config.ForPlugin. +func NewAgent(opts ...Option) Agent { + options := newOptions(opts...) + + if !flag.Parsed() { + for _, p := range options.Plugins { + name := p.String() + agentLogger.Debugf("registering flags for: %q", name) + config.RegisterFlagsFor(name) + } + if flag.Lookup(config.DirFlag) == nil { + flag.String(config.DirFlag, config.DirDefault, config.DirUsage) + } + flag.Parse() + } + + return &agent{ + opts: options, + } +} + +type agent struct { + opts Options + + stopCh chan struct{} + + startOnce once.ReturnError + stopOnce once.ReturnError +} + +// Options returns the Options the agent was created with +func (a *agent) Options() Options { + return a.opts +} + +// Start starts the agent. Start will return as soon as the Agent is ready. The Agent continues +// running after Start returns. +func (a *agent) Start() error { + return a.startOnce.Do(a.startSignalWrapper) +} + +// Stop the Agent. Calls close on all Plugins +func (a *agent) Stop() error { + return a.stopOnce.Do(a.stop) +} + +// Run runs the agent. Run will not return until a SIGINT, SIGTERM, or SIGKILL is received +func (a *agent) Run() error { + if err := a.Start(); err != nil { + return err + } + return a.Wait() +} + +func (a *agent) startSignalWrapper() error { + logging.DefaultLogger.WithFields(logging.Fields{ + "CommitHash": CommitHash, + "BuildDate": BuildDate, + }).Infof("Starting agent %v", BuildVersion) + + // If we want to properly handle cleanup when a SIG comes in *during* + // agent startup (ie, clean up after its finished) we need to register + // for the signal before we start() the agent + sig := make(chan os.Signal, 1) + if len(a.opts.QuitSignals) > 0 { + signal.Notify(sig, a.opts.QuitSignals...) + } + + // If the agent started, we have things to clean up if here is a SIG + // So fire off a goroutine to do that + if err := a.start(); err != nil { + signal.Stop(sig) + return err + } + + go func() { + var quit <-chan struct{} + if a.opts.Context != nil { + quit = a.opts.Context.Done() + } + // Wait for signal or agent stop + select { + case <-a.opts.QuitChan: + logging.DefaultLogger.Info("Quit channel closed, stopping.") + case <-quit: + logging.DefaultLogger.Info("Context canceled, stopping.") + case s := <-sig: + logging.DefaultLogger.Infof("Signal %v received, stopping.", s) + case <-a.After(): + } + // Doesn't hurt to call Stop twice, its idempotent because of the + // stopOnce + a.Stop() + signal.Stop(sig) + }() + + return nil +} + +func (a *agent) start() error { + agentLogger.Debugf("starting %d plugins", len(a.opts.Plugins)) + + // Init plugins + for _, plugin := range a.opts.Plugins { + agentLogger.Debugf("=> Init(): %v", plugin) + if err := plugin.Init(); err != nil { + return err + } + } + + // AfterInit plugins + for _, plugin := range a.opts.Plugins { + if postPlugin, ok := plugin.(infra.PostInit); ok { + agentLogger.Debugf("=> AfterInit(): %v", plugin) + if err := postPlugin.AfterInit(); err != nil { + return err + } + } else { + agentLogger.Debugf("-- plugin %v has no AfterInit()", plugin) + } + } + + a.stopCh = make(chan struct{}) // If we are started, we have a stopCh to signal stopping + + logging.DefaultLogger.Infof("Agent started with %d plugins", len(a.opts.Plugins)) + + return nil +} + +func (a *agent) stop() error { + if a.stopCh == nil { + err := errors.New("attempted to stop an agent that wasn't Started") + logging.DefaultLogger.Error(err) + return err + } + defer close(a.stopCh) + + // Close plugins + for _, p := range a.opts.Plugins { + agentLogger.Debugf("=> Close(): %v", p) + if err := p.Close(); err != nil { + return err + } + } + + logging.DefaultLogger.Info("Agent stopped") + + return nil +} + +// Wait will not return until a SIGINT, SIGTERM, or SIGKILL is received +// Or the Agent is Stopped +// All Plugins are Closed() before Wait returns +func (a *agent) Wait() error { + if a.stopCh == nil { + err := errors.New("attempted to wait on an agent that wasn't Started") + logging.DefaultLogger.Error(err) + return err + } + <-a.stopCh + + // If we get here, a.Stop() has already been called, and we are simply + // retrieving the error if any squirreled away by stopOnce + return a.Stop() +} + +// After returns a channel that will be closed when the agent is Stopped. +// To retrieve any error from the agent stopping call Error() on the agent +// The normal pattern of use is: +// +// agent := NewAgent(options...) +// agent.Start() +// select { +// case <-agent.After() // Will wait till the agent is stopped +// ... +// } +// err := agent.Error() // Will return any error from the agent being stopped +// +func (a *agent) After() <-chan struct{} { + if a.stopCh != nil { + return a.stopCh + } + // The agent didn't start, so we can't return a.stopCh + // because *only* a.start() should allocate that + // we won't return a nil channel, because nil channels + // block forever. + // Since the normal pattern is to call a.After() so you + // can select till the agent is done and a.Stop() to + // retrieve the error, returning a closed channel will preserve that + // usage, as a.Stop() returns an error complaining that the agent + // never started. + ch := make(chan struct{}) + close(ch) + return ch +} + +// Error returns any error that occurred when the agent was Stopped +func (a *agent) Error() error { + // a.Stop() returns whatever error occurred when stopping the agent + // This is because of stopOnce + // If you try to retrieve an error before the agent is started, you will get + // an error complaining the agent isn't started. + return a.Stop() +} diff --git a/vendor/github.com/ligato/cn-infra/agent/options.go b/vendor/github.com/ligato/cn-infra/agent/options.go new file mode 100644 index 0000000000..24a19082c0 --- /dev/null +++ b/vendor/github.com/ligato/cn-infra/agent/options.go @@ -0,0 +1,133 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package agent + +import ( + "context" + "os" + "reflect" + "syscall" + + "github.com/ligato/cn-infra/infra" +) + +// Options specifies option list for the Agent +type Options struct { + QuitSignals []os.Signal + QuitChan chan struct{} + Context context.Context + Plugins []infra.Plugin + + pluginMap map[infra.Plugin]struct{} + pluginNames map[string]struct{} +} + +func newOptions(opts ...Option) Options { + opt := Options{ + QuitSignals: []os.Signal{ + os.Interrupt, + syscall.SIGTERM, + }, + pluginMap: make(map[infra.Plugin]struct{}), + pluginNames: make(map[string]struct{}), + } + + for _, o := range opts { + o(&opt) + } + + return opt +} + +// Option is a function that operates on an Agent's Option +type Option func(*Options) + +// Version returns an Option that sets the version of the Agent to the entered string +func Version(buildVer, buildDate, commitHash string) Option { + return func(o *Options) { + BuildVersion = buildVer + BuildDate = buildDate + CommitHash = commitHash + } +} + +// Context returns an Option that sets the context for the Agent +func Context(ctx context.Context) Option { + return func(o *Options) { + o.Context = ctx + } +} + +// QuitSignals returns an Option that will set signals which stop Agent +func QuitSignals(sigs ...os.Signal) Option { + return func(o *Options) { + o.QuitSignals = sigs + } +} + +// QuitOnClose returns an Option that will set channel which stops Agent on close +func QuitOnClose(ch chan struct{}) Option { + return func(o *Options) { + o.QuitChan = ch + } +} + +// Plugins creates an Option that adds a list of Plugins to the Agent's Plugin list +func Plugins(plugins ...infra.Plugin) Option { + return func(o *Options) { + o.Plugins = append(o.Plugins, plugins...) + } +} + +// AllPlugins creates an Option that adds all of the nested +// plugins recursively to the Agent's plugin list. +func AllPlugins(plugins ...infra.Plugin) Option { + return func(o *Options) { + agentLogger.Debugf("AllPlugins with %d plugins", len(plugins)) + + for _, plugin := range plugins { + typ := reflect.TypeOf(plugin) + agentLogger.Debugf("searching for all deps in: %v (type: %v)", plugin, typ) + + foundPlugins, err := findPlugins(reflect.ValueOf(plugin), o.pluginMap) + if err != nil { + panic(err) + } + + agentLogger.Debugf("found %d plugins in: %v (type: %v)", len(foundPlugins), plugin, typ) + for _, plug := range foundPlugins { + agentLogger.Debugf(" - plugin: %v (%v)", plug, reflect.TypeOf(plug)) + + if _, ok := o.pluginNames[plug.String()]; ok { + agentLogger.Fatalf("plugin with name %q already registered", plug.String()) + } + o.pluginNames[plug.String()] = struct{}{} + } + o.Plugins = append(o.Plugins, foundPlugins...) + + // TODO: perhaps set plugin name to typ.Strilng() if it's empty + /*p, ok := plugin.(core.PluginNamed) + if !ok { + p = core.NamePlugin(typ.String(), plugin) + }*/ + + if _, ok := o.pluginNames[plugin.String()]; ok { + agentLogger.Fatalf("plugin with name %q already registered, custom name should be used", plugin.String()) + } + o.pluginNames[plugin.String()] = struct{}{} + o.Plugins = append(o.Plugins, plugin) + } + } +} diff --git a/vendor/github.com/ligato/cn-infra/agent/plugin_lookup.go b/vendor/github.com/ligato/cn-infra/agent/plugin_lookup.go new file mode 100644 index 0000000000..25ce154380 --- /dev/null +++ b/vendor/github.com/ligato/cn-infra/agent/plugin_lookup.go @@ -0,0 +1,180 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package agent + +import ( + "fmt" + "os" + "reflect" + + "github.com/ligato/cn-infra/infra" + "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/logging/logrus" +) + +var agentLogger = logrus.NewLogger("agent") + +func init() { + if os.Getenv("DEBUG_INFRA") != "" { + agentLogger.SetLevel(logging.DebugLevel) + agentLogger.Debugf("agent debug logger enabled") + } +} + +func findPlugins(val reflect.Value, uniqueness map[infra.Plugin]struct{}, x ...int) ( + res []infra.Plugin, err error, +) { + n := 0 + if len(x) > 0 { + n = x[0] + } + var logf = func(f string, a ...interface{}) { + for i := 0; i < n; i++ { + f = "\t" + f + } + //agentLogger.Debugf(f, a...) + if agentLogger.GetLevel() == logging.DebugLevel { + fmt.Printf(f+"\n", a...) + } + } + + typ := val.Type() + + logf("=> %v (%v)", typ, typ.Kind()) + defer logf("== %v ", typ) + + if typ.Kind() == reflect.Interface { + if val.IsNil() { + logf(" - val is nil") + return nil, nil + } + val = val.Elem() + typ = val.Type() + //logf(" - interface to elem: %v (%v)", typ, val.Kind()) + } + + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + //logrus.DefaultLogger().Debug(" - typ ptr kind: ", typ) + } + if val.Kind() == reflect.Ptr { + val = val.Elem() + //logrus.DefaultLogger().Debug(" - val ptr kind: ", val) + } + + if !val.IsValid() { + logf(" - val is invalid") + return nil, nil + } + + if typ.Kind() != reflect.Struct { + logf(" - is not a struct: %v %v", typ.Kind(), val.Kind()) + return nil, nil + } + + //logf(" -> checking %d fields", typ.NumField()) + + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + + // PkgPath is empty for exported fields + if exported := field.PkgPath == ""; !exported { + continue + } + + fieldVal := val.Field(i) + + logf("-> field %d: %v - %v (%v)", i, field.Name, field.Type, fieldVal.Kind()) + + var fieldPlug infra.Plugin + + plug, implementsPlugin := isFieldPlugin(field, fieldVal) + if implementsPlugin { + if plug == nil { + logf(" - found nil plugin: %v", field.Name) + continue + } + + _, found := uniqueness[plug] + if found { + logf(" - found duplicate plugin: %v %v", field.Name, field.Type) + continue + } + + uniqueness[plug] = struct{}{} + /*p, ok := plug.(core.PluginNamed) + if !ok { + p = core.NamePlugin(field.Name, plug) + }*/ + fieldPlug = plug + + logf(" + FOUND PLUGIN: %v - %v (%v)", plug.String(), field.Name, field.Type) + + /*var pp core.Plugin = plug + if np, ok := p.(*core.NamedPlugin); ok { + pp = np.Plugin + }*/ + } + + // do recursive inspection only for plugins and fields Deps + if fieldPlug != nil || (field.Name == "Deps" && fieldVal.Kind() == reflect.Struct) { + //var l []core.PluginNamed + // try to inspect structure recursively + l, err := findPlugins(fieldVal, uniqueness, n+1) + if err != nil { + logf(" - Bad field: %v %v", field.Name, err) + continue + } + //logf(" - listed %v plugins from %v (%v)", len(l), field.Name, field.Type) + res = append(res, l...) + } + + if fieldPlug != nil { + res = append(res, fieldPlug) + } + } + + logf("<- got %d plugins", len(res)) + + return res, nil +} + +var pluginType = reflect.TypeOf((*infra.Plugin)(nil)).Elem() + +func isFieldPlugin(field reflect.StructField, fieldVal reflect.Value) (infra.Plugin, bool) { + //logrus.DefaultLogger().Debugf(" - is field plugin: %v (%v) %v", field.Type, fieldVal.Kind(), fieldVal) + + switch fieldVal.Kind() { + case reflect.Struct: + ptrType := reflect.PtrTo(fieldVal.Type()) + if ptrType.Implements(pluginType) { + if fieldVal.CanAddr() { + if plug, ok := fieldVal.Addr().Interface().(infra.Plugin); ok { + return plug, true + } + } + return nil, true + } + case reflect.Ptr, reflect.Interface: + if plug, ok := fieldVal.Interface().(infra.Plugin); ok { + if fieldVal.IsNil() { + return nil, true + } + return plug, true + } + } + + return nil, false +} diff --git a/vendor/github.com/ligato/cn-infra/config/plugin_config.go b/vendor/github.com/ligato/cn-infra/config/plugin_config.go index 50aacb1653..dced622612 100644 --- a/vendor/github.com/ligato/cn-infra/config/plugin_config.go +++ b/vendor/github.com/ligato/cn-infra/config/plugin_config.go @@ -1,31 +1,35 @@ package config import ( + "fmt" "os" "path" "strings" "sync" - "github.com/ligato/cn-infra/logging/logrus" "github.com/namsral/flag" + + "github.com/ligato/cn-infra/logging/logrus" ) -// FlagSuffix is added to plugin name while loading plugins configuration. -const FlagSuffix = "-config" +const ( + // FlagSuffix is added to plugin name while loading plugins configuration. + FlagSuffix = "-config" -// EnvSuffix is added to plugin name while loading plugins configuration from ENV variable. -const EnvSuffix = "_CONFIG" + // EnvSuffix is added to plugin name while loading plugins configuration from ENV variable. + EnvSuffix = "_CONFIG" -// DirFlag as flag name (see implementation in declareFlags()) -// is used to define default directory where config files reside. -// This flag name is derived from the name of the plugin. -const DirFlag = "config-dir" + // DirFlag as flag name (see implementation in declareFlags()) + // is used to define default directory where config files reside. + // This flag name is derived from the name of the plugin. + DirFlag = "config-dir" -// DirDefault holds a default value "." for flag, which represents current working directory. -const DirDefault = "." + // DirDefault holds a default value "." for flag, which represents current working directory. + DirDefault = "." -// DirUsage used as a flag (see implementation in declareFlags()). -const DirUsage = "Location of the configuration files; also set via 'CONFIG_DIR' env variable." + // DirUsage used as a flag (see implementation in declareFlags()). + DirUsage = "Location of the config files; can also be set via 'CONFIG_DIR' env variable." +) // PluginConfig is API for plugins to access configuration. // @@ -41,6 +45,21 @@ type PluginConfig interface { GetConfigName() string } +// FlagSet is a type alias for flag.FlagSet. +type FlagSet = flag.FlagSet + +// pluginFlags is used for storing flags for Plugins before agent starts. +var pluginFlags = make(map[string]*FlagSet) + +// RegisterFlagsFor registers defined flags for plugin with given name. +func RegisterFlagsFor(name string) { + if plugSet, ok := pluginFlags[name]; ok { + plugSet.VisitAll(func(f *flag.Flag) { + flag.Var(f.Value, f.Name, f.Usage) + }) + } +} + // ForPlugin returns API that is injectable to a particular Plugin // and is used to read it's configuration. // @@ -49,36 +68,58 @@ type PluginConfig interface { // opts (used to define flag (if it was not already defined)): // - default value // - usage -func ForPlugin(pluginName string, opts ...string) PluginConfig { - flgName := pluginName + FlagSuffix - flg := flag.CommandLine.Lookup(flgName) - if flg == nil { - var flagDefault, flagUsage string - - if len(opts) > 0 && opts[0] != "" { - flagDefault = opts[0] - } else { - flagDefault = pluginName + ".conf" - } - if len(opts) > 1 && opts[1] != "" { - flagUsage = opts[1] - } else { - flagUsage = "Location of the " + pluginName + - " Client configuration file; also set via '" + - strings.ToUpper(pluginName) + EnvSuffix + "' env variable." - } - flag.String(flgName, flagDefault, flagUsage) +func ForPlugin(name string, moreFlags ...func(*FlagSet)) PluginConfig { + flagSet := flag.NewFlagSet(name, flag.ExitOnError) + + for _, more := range moreFlags { + more(flagSet) } - return &pluginConfig{pluginName: pluginName} + cfgFlag := name + FlagSuffix + if flagSet.Lookup(cfgFlag) == nil { + cfgFlagDefault := name + ".conf" + cfgFlagUsage := fmt.Sprintf( + "Location of the %q plugin config file; can also be set via %q env variable.", + cfgFlagDefault, strings.ToUpper(name)+EnvSuffix) + flagSet.String(cfgFlag, cfgFlagDefault, cfgFlagUsage) + } + + pluginFlags[name] = flagSet + + return &pluginConfig{ + configFlag: cfgFlag, + } } type pluginConfig struct { - pluginName string + configFlag string access sync.Mutex cfg string } +// Dir evaluates the flag DirFlag. It interprets "." as current working directory. +func Dir() (string, error) { + flg := flag.CommandLine.Lookup(DirFlag) + if flg != nil { + val := flg.Value.String() + if strings.HasPrefix(val, ".") { + cwd, err := os.Getwd() + if err != nil { + return cwd, err + } + + if len(val) > 1 { + return cwd + val[1:], nil + } + return cwd, nil + } + + return val, nil + } + + return "", nil +} + // GetValue binds the configuration to config method argument. func (p *pluginConfig) GetValue(config interface{}) (found bool, err error) { cfgName := p.GetConfigName() @@ -108,12 +149,9 @@ func (p *pluginConfig) GetConfigName() string { } func (p *pluginConfig) getConfigName() string { - flgName := p.pluginName + FlagSuffix - flg := flag.CommandLine.Lookup(flgName) + flg := flag.CommandLine.Lookup(p.configFlag) if flg != nil { - flgVal := flg.Value.String() - - if flgVal != "" { + if flgVal := flg.Value.String(); flgVal != "" { // if exist value from flag if _, err := os.Stat(flgVal); !os.IsNotExist(err) { return flgVal @@ -130,29 +168,5 @@ func (p *pluginConfig) getConfigName() string { } } } - return "" } - -// Dir evaluates the flag DirFlag. It interprets "." as current working directory. -func Dir() (string, error) { - flg := flag.CommandLine.Lookup(DirFlag) - if flg != nil { - val := flg.Value.String() - if strings.HasPrefix(val, ".") { - cwd, err := os.Getwd() - if err != nil { - return cwd, err - } - - if len(val) > 1 { - return cwd + val[1:], nil - } - return cwd, nil - } - - return val, nil - } - - return "", nil -} diff --git a/vendor/github.com/ligato/cn-infra/core/README.md b/vendor/github.com/ligato/cn-infra/core/README.md deleted file mode 100644 index 17dfad46bf..0000000000 --- a/vendor/github.com/ligato/cn-infra/core/README.md +++ /dev/null @@ -1,19 +0,0 @@ -## CN-Infra Core - -The `core` package contains the CN-Infra Core that manages the startup -and graceful shutdown of CN-Infra based applications. The startup & -shutdown lifecycle is depicted in the sequence diagram below. The startup -and shutdown behavior is described in comments for the `Start()` and -`Stop()` functions in [agent_core.go](agent_core.go), and for the -`EventLoopWithInterrupt()`function in [event_loop.go](event_loop.go). - -![plugin lifecycle](../docs/imgs/plugin_lifecycle.png) - -The `core` package also defines the CN-Infra Core's [SPI](plugin_spi.go) -that must be implemented by each plugin (see [Guidelines](../docs/guidelines/PLUGIN_LIFECYCLE.md)). -The SPI is used by the Core to Init(), AfterInit() and Close() each plugin. - - - - - diff --git a/vendor/github.com/ligato/cn-infra/core/agent_core.go b/vendor/github.com/ligato/cn-infra/core/agent_core.go deleted file mode 100644 index 5cc9a63cf7..0000000000 --- a/vendor/github.com/ligato/cn-infra/core/agent_core.go +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package core - -import ( - "errors" - "fmt" - "strings" - "time" - - "github.com/namsral/flag" - - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/logrus" -) - -var ( - // DefaultMaxStartupTime defines maximal duration of start for agent. - DefaultMaxStartupTime = 15 * time.Second -) - -// Agent implements startup & shutdown procedures. -type Agent struct { - // plugin list - plugins []*NamedPlugin - logging.Logger - // The field is set before initialization of every plugin with its name. - currentlyProcessing string - // agent's stopwatch - timer Timer -} - -// Timer holds all startup times. -type Timer struct { - // The startup/initialization must take no longer that maxStartup. - MaxStartupTime time.Duration - // timers - agentStart time.Time - initStart time.Time - afterInitStart time.Time - // durations - init time.Duration - afterInit time.Duration -} - -// NewAgent returns a new instance of the Agent with plugins. Use options if needed: -// will be used to log messages related to the agent life-cycle, -// but not for the plugins themselves. -// puts a time limit on initialization of all provided plugins. -// Agent.Start() returns ErrPluginsInitTimeout error if one or more plugins fail -// to initialize inside the specified time limit. -// is a variable list of plugins to load. ListPluginsInFlavor() helper -// method can be used to obtain the list from a given flavor. -// -// Example 1 (existing flavor - or use alias rpc.NewAgent()): -// -// core.NewAgent(&FlavorRPC{}, core.WithTimeout(5 * time.Second), rpc.WithPlugins(func(flavor *FlavorRPC) []*core.NamedPlugins { -// return []*core.NamedPlugins{{"customization": &CustomPlugin{DependencyXY: &flavor.GRPC}}} -// }) -// -// Example 2 (custom flavor): -// -// core.NewAgent(&MyFlavor{}, core.WithTimeout(5 * time.Second), my.WithPlugins(func(flavor *MyFlavor) []*core.NamedPlugins { -// return []*core.NamedPlugins{{"customization": &CustomPlugin{DependencyXY: &flavor.XY}}} -// }) -func NewAgent(flavor Flavor, opts ...Option) *Agent { - maxStartup := DefaultMaxStartupTime - - var flavors []Flavor - if fs, ok := flavor.(flavorAggregator); ok { - flavors = fs.fs - } else { - flavors = []Flavor{flavor} - } - - flavor.Inject() - - var agentCoreLogger logging.Logger - plugins := flavor.Plugins() - - for _, opt := range opts { - switch opt.(type) { - case WithPluginsOpt: - plugins = append(plugins, opt.(WithPluginsOpt).Plugins(flavors...)...) - case *WithTimeoutOpt: - ms := opt.(*WithTimeoutOpt).Timeout - if ms > 0 { - maxStartup = ms - } - case *WithLoggerOpt: - agentCoreLogger = opt.(*WithLoggerOpt).Logger - } - } - - if logRegGet, ok := flavor.(logRegistryGetter); ok && logRegGet != nil { - logReg := logRegGet.LogRegistry() - if logReg != nil { - agentCoreLogger = logReg.NewLogger("agentcore") - } else { - agentCoreLogger = logrus.DefaultLogger() - } - } else { - agentCoreLogger = logrus.DefaultLogger() - } - - return &Agent{ - plugins: plugins, - Logger: agentCoreLogger, - timer: Timer{ - MaxStartupTime: maxStartup, - }, - } -} - -// NewAgentDeprecated older & deprecated version of a constructor -// Function returns a new instance of the Agent with plugins. -// will be used to log messages related to the agent life-cycle, -// but not for the plugins themselves. -// sets a time limit for initialization of all provided plugins. -// Agent.Start() returns ErrPluginsInitTimeout error if one or more plugins fail -// to initialize in the specified time limit. -// is a variable that holds a list of plugins to load. ListPluginsInFlavor() helper -// method can be used to obtain the list from a given flavor. -func NewAgentDeprecated(logger logging.Logger, maxStartup time.Duration, plugins ...*NamedPlugin) *Agent { - return &Agent{ - plugins: plugins, - Logger: logger, - timer: Timer{ - MaxStartupTime: maxStartup, - }, - } -} - -type logRegistryGetter interface { - // LogRegistry is a getter for log registry instance - LogRegistry() logging.Registry -} - -// Start starts/initializes all selected plugins. -// The first iteration tries to run Init() method on every plugin from the list. -// If any of the plugins fails to initialize (Init() returns non-nil error), -// the initialization is cancelled by calling Close() method for already initialized -// plugins in the reverse order. The encountered error is returned by this -// function as-is. -// The second iteration does the same for the AfterInit() method. The difference -// is that AfterInit() is an optional method (not required by the Plugin -// interface, only suggested by PostInit interface) and therefore not necessarily -// called on every plugin. -// The startup/initialization must take no longer than maxStartup time limit, -// otherwise ErrPluginsInitTimeout error is returned. -func (agent *Agent) Start() error { - agent.WithFields(logging.Fields{"CommitHash": CommitHash, "BuildDate": BuildDate}). - Infof("Starting agent %v", BuildVersion) - - if !flag.Parsed() { - flag.Parse() - } - - doneChannel := make(chan struct{}) - errChannel := make(chan error) - - agent.timer.agentStart = time.Now() - - go func() { - if err := agent.initPlugins(); err != nil { - errChannel <- err - return - } - - if err := agent.handleAfterInit(); err != nil { - errChannel <- err - return - } - - close(doneChannel) - }() - - // Block until all Plugins are initialized or timeout expires. - select { - case <-doneChannel: - agent.Infof("Agent started successfully, took %v (Init: %v, AfterInit: %v)", - agent.timer.init+agent.timer.afterInit, agent.timer.init, agent.timer.afterInit) - return nil - - case err := <-errChannel: - agent.Debugf("Agent Init took %v", agent.timer.init) - agent.Debugf("Agent AfterInit took %v", agent.timer.afterInit) - return err - - case <-time.After(agent.timer.MaxStartupTime): - if agent.timer.init == 0 { - agent.Infof("Agent Init took > %v", agent.timer.MaxStartupTime) - } else { - agent.Infof("Agent Init took %v", agent.timer.init) - agent.Infof("Agent AfterInit took > %v", agent.timer.MaxStartupTime) - } - return fmt.Errorf("plugin %s not completed before timeout", agent.currentlyProcessing) - } -} - -// Stop gracefully shuts down the Agent. It is called usually when the user -// interrupts the Agent from the EventLoopWithInterrupt(). -// -// This implementation tries to call Close() method on every plugin on the list -// in the reverse order. It continues even if some error occurred. -func (agent *Agent) Stop() error { - agent.Info("Stopping agent...") - - var errMsgs []string - for i := len(agent.plugins) - 1; i >= 0; i-- { - p := agent.plugins[i] - - agent.Debugf("Closing plugin: %s", p) - - if err := p.Plugin.Close(); err != nil { - agent.Warnf("plugin %s: Close failed: %v", p, err) - errMsgs = append(errMsgs, fmt.Sprintf("%s: %v", p, err)) - } - } - - agent.Info("Agent stopped") - - if len(errMsgs) > 0 { - return errors.New(strings.Join(errMsgs, ", ")) - } - - return nil -} - -// initPlugins calls Init() on all plugins in the list. -func (agent *Agent) initPlugins() error { - // Flag indicates that some of the plugins failed to initialize - var initPluginCounter int - var wasError error - - agent.timer.initStart = time.Now() - for index, plugin := range agent.plugins { - initPluginCounter = index - - // Set currently initialized plugin name. - agent.currentlyProcessing = plugin.String() - - // Skip all other plugins if some of them failed. - if wasError != nil { - agent.Warnf("plugin %s: Init skipped due to previous error", plugin) - continue - } - - pluginStartTime := time.Now() - if err := plugin.Init(); err != nil { - wasError = fmt.Errorf("plugin %s: Init failed: %v", plugin, err) - agent.WithField("took", time.Since(pluginStartTime)).Error(wasError) - } else { - agent.WithField("took", time.Since(pluginStartTime)).Infof("plugin %s: Init ok", plugin) - } - } - agent.timer.init = time.Since(agent.timer.initStart) - - if wasError != nil { - //Stop the plugins that are initialized - for i := initPluginCounter; i >= 0; i-- { - p := agent.plugins[i] - - agent.Debugf("Closing plugin: %s", p) - - if err := p.Close(); err != nil { - wasError = err - } - } - return wasError - } - - return nil -} - -// handleAfterInit calls the AfterInit handlers on plugins that can only -// finish their initialization after all other plugins have been initialized. -func (agent *Agent) handleAfterInit() error { - // Flag indicates that some of the plugins failed to after-initialize - var wasError error - - agent.timer.afterInitStart = time.Now() - for _, plugin := range agent.plugins { - // Set currently after-initialized plugin name. - agent.currentlyProcessing = plugin.String() - - // Skip all other plugins if some of them failed. - if wasError != nil { - agent.Warnf("plugin %s: AfterInit skipped due to previous error", plugin) - continue - } - - // Check if plugin implements AfterInit(). - if postPlugin, ok := plugin.Plugin.(PostInit); ok { - pluginStartTime := time.Now() - if err := postPlugin.AfterInit(); err != nil { - wasError = fmt.Errorf("plugin %s: AfterInit failed: %v", plugin, err) - agent.WithField("took", time.Since(pluginStartTime)).Error(wasError) - } else { - agent.WithField("took", time.Since(pluginStartTime)).Infof("plugin %s: AfterInit ok", plugin) - } - } else { - agent.Debugf("plugin %s: no AfterInit implement", plugin) - } - } - agent.timer.afterInit = time.Since(agent.timer.afterInitStart) - - if wasError != nil { - agent.Stop() - return wasError - } - - return nil -} diff --git a/vendor/github.com/ligato/cn-infra/core/doc.go b/vendor/github.com/ligato/cn-infra/core/doc.go deleted file mode 100644 index 2ff8cfb4f8..0000000000 --- a/vendor/github.com/ligato/cn-infra/core/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package core manages the lifecycle of all plugins (start, graceful -// shutdown) and defines the core lifecycle SPI. The core lifecycle SPI -// must be implemented by each plugin. -package core diff --git a/vendor/github.com/ligato/cn-infra/core/event_loop.go b/vendor/github.com/ligato/cn-infra/core/event_loop.go deleted file mode 100644 index d8d89e6d49..0000000000 --- a/vendor/github.com/ligato/cn-infra/core/event_loop.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package core - -import ( - "os" - "os/signal" - "syscall" -) - -// EventLoopWithInterrupt starts an instance of the agent created with NewAgent(). -// Agent is stopped when is closed, a user interrupt (SIGINT), or a -// terminate signal (SIGTERM) is received. -func EventLoopWithInterrupt(agent *Agent, closeChan chan struct{}) error { - err := agent.Start() - if err != nil { - agent.Error("Error loading core: ", err) - return err - } - - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, os.Interrupt) - signal.Notify(sigChan, syscall.SIGTERM) - select { - case <-sigChan: - agent.Println("Interrupt received, returning.") - case <-closeChan: - } - - err = agent.Stop() - if err != nil { - agent.Errorf("Agent stop error '%+v'", err) - } - return err -} diff --git a/vendor/github.com/ligato/cn-infra/core/list_flavor_plugin.go b/vendor/github.com/ligato/cn-infra/core/list_flavor_plugin.go deleted file mode 100644 index d7366d1159..0000000000 --- a/vendor/github.com/ligato/cn-infra/core/list_flavor_plugin.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package core - -import ( - "errors" - "os" - "reflect" - - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/logrus" -) - -var flavorLogger = logrus.NewLogger("flavors") - -func init() { - if os.Getenv("DEBUG_FLAVORS") != "" { - flavorLogger.SetLevel(logging.DebugLevel) - } -} - -// Flavor is a structure that contains a particular combination of plugins -// (fields of plugins). -type Flavor interface { - // Plugins returns a list of plugins. - // Name of the plugin is supposed to be related to field name of Flavor struct. - Plugins() []*NamedPlugin - - // Inject method is supposed to be implemented by each Flavor - // to inject dependencies between the plugins. - Injector - - // LogRegistry is a getter for accessing log registry (that allows to create new loggers) - LogRegistry() logging.Registry -} - -// Injector is simple interface reused at least on two places: -// - Flavor -// - NewAgent constructor WithPlugins() option -type Injector interface { - // When this method is called for the first time it returns true - // (meaning the dependency injection ran at the first time). - // It is possible to call this method repeatedly (then it will return false). - Inject() (firstRun bool) -} - -// ListPluginsInFlavor lists plugins in a Flavor. -// It extracts all plugins and returns them as a slice of NamedPlugins. -func ListPluginsInFlavor(flavor Flavor) (plugins []*NamedPlugin) { - uniqueness := map[Plugin]interface{}{} - l, err := listPluginsInFlavor(reflect.ValueOf(flavor), uniqueness) - if err != nil { - flavorLogger.Error("Invalid argument - it does not satisfy the Flavor interface") - } - return l -} - -// listPluginsInFlavor lists all plugins in a Flavor. A Flavor is composed -// of one or more Plugins and (optionally) multiple Inject. The composition -// is recursive: a component Flavor contains Plugin components and may -// contain Flavor components as well. The function recursively lists -// plugins included in component Inject. -// -// The function returns an error if the flavorValue argument does not -// satisfy the Flavor interface. All components in the argument flavorValue -// must satisfy either the Plugin or the Flavor interface. If they do not, -// an error is logged, but the function does not return an error. -func listPluginsInFlavor(flavorValue reflect.Value, uniqueness map[Plugin]interface{}) ([]*NamedPlugin, error) { - flavorLogger.Debug("inspect flavor structure ", flavorValue.Type()) - - var res []*NamedPlugin - - flavorType := flavorValue.Type() - - if flavorType.Kind() == reflect.Ptr { - flavorType = flavorType.Elem() - } - - if flavorValue.Kind() == reflect.Ptr { - flavorValue = flavorValue.Elem() - } - - if !flavorValue.IsValid() { - return res, nil - } - - if _, ok := flavorValue.Addr().Interface().(Flavor); !ok { - return res, errors.New("does not satisfy the Flavor interface") - } - - pluginType := reflect.TypeOf((*Plugin)(nil)).Elem() - - if flavorType.Kind() == reflect.Struct { - numField := flavorType.NumField() - for i := 0; i < numField; i++ { - field := flavorType.Field(i) - - exported := field.PkgPath == "" // PkgPath is empty for exported fields - if !exported { - continue - } - - fieldVal := flavorValue.Field(i) - plug, implementsPlugin := fieldPlugin(field, fieldVal, pluginType) - if implementsPlugin { - if plug != nil { - _, found := uniqueness[plug] - if !found { - uniqueness[plug] = nil - res = append(res, &NamedPlugin{PluginName: PluginName(field.Name), Plugin: plug}) - - flavorLogger. - WithField("fieldName", field.Name). - Debug("Found plugin in flavor ", field.Type) - } else { - flavorLogger. - WithField("fieldName", field.Name). - Debug("Found plugin in flavor with non unique name") - } - } else { - flavorLogger. - WithField("fieldName", field.Name). - Debug("Found nil plugin in flavor") - } - } else { - // try to inspect flavor structure recursively - l, err := listPluginsInFlavor(fieldVal, uniqueness) - if err != nil { - flavorLogger. - WithField("fieldName", field.Name). - Error("Bad field: must satisfy either Plugin or Flavor interface") - } else { - res = append(res, l...) - } - } - } - } - - return res, nil -} - -// fieldPlugin determines if a given field satisfies the Plugin interface. -// If yes, the plugin value is returned; if not, nil is returned. -func fieldPlugin(field reflect.StructField, fieldVal reflect.Value, pluginType reflect.Type) ( - plugin Plugin, implementsPlugin bool) { - - switch fieldVal.Kind() { - case reflect.Struct: - ptrType := reflect.PtrTo(fieldVal.Type()) - if ptrType.Implements(pluginType) { - if fieldVal.CanAddr() { - if plug, ok := fieldVal.Addr().Interface().(Plugin); ok { - return plug, true - } - } - return nil, true - } - case reflect.Ptr, reflect.Interface: - if plug, ok := fieldVal.Interface().(Plugin); ok { - if fieldVal.IsNil() { - flavorLogger.WithField("fieldName", field.Name). - Debug("Field is nil ", pluginType) - return nil, true - } - return plug, true - } - - } - return nil, false -} - -// Inject is a utility if you need to combine multiple flavorAggregator for in first parameter of NewAgent() -// It calls Inject() on every plugin. -// -// Example: -// -// NewAgent(Inject(&Flavor1{}, &Flavor2{})) -// -func Inject(fs ...Flavor) Flavor { - ret := flavorAggregator{fs} - ret.Inject() - return ret -} - -type flavorAggregator struct { - fs []Flavor -} - -// Plugins returns list of plugins af all flavorAggregator -func (flavors flavorAggregator) Plugins() []*NamedPlugin { - var ret []*NamedPlugin - for _, f := range flavors.fs { - ret = appendDiff(ret, f.Plugins()...) - } - return ret -} - -// Inject returns true if at least one returned true -func (flavors flavorAggregator) Inject() (firstRun bool) { - ret := false - for _, f := range flavors.fs { - ret = ret || f.Inject() - } - return ret -} - -// LogRegistry is a getter for accessing log registry of first flavor -func (flavors flavorAggregator) LogRegistry() logging.Registry { - if len(flavors.fs) > 0 { - flavors.fs[0].LogRegistry() - } - - return nil -} - -// Do not append plugins contained in multiple flavors -func appendDiff(existing []*NamedPlugin, new ...*NamedPlugin) []*NamedPlugin { - for _, newPlugin := range new { - exists := false - for _, existingPlugin := range existing { - if newPlugin.PluginName == existingPlugin.PluginName { - flavorLogger.Debugf("duplicate of plugin skipped %v", newPlugin.PluginName) - exists = true - break - } - } - if !exists { - existing = append(existing, newPlugin) - } - } - return existing -} diff --git a/vendor/github.com/ligato/cn-infra/core/name.go b/vendor/github.com/ligato/cn-infra/core/name.go deleted file mode 100644 index e66988be0e..0000000000 --- a/vendor/github.com/ligato/cn-infra/core/name.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package core - -// PluginName is a part of the plugin's API and it is supposed -// to be defined as a publicly accessible string constant. -// It is used to obtain the appropriate instance of the registry -// (there are multiple instances). -type PluginName string - -// NamedPlugin represents a Plugin with a name. -type NamedPlugin struct { - PluginName - Plugin -} - -// String returns the PluginName. -func (np *NamedPlugin) String() string { - return string(np.PluginName) -} diff --git a/vendor/github.com/ligato/cn-infra/core/options.go b/vendor/github.com/ligato/cn-infra/core/options.go deleted file mode 100644 index 9554f36e38..0000000000 --- a/vendor/github.com/ligato/cn-infra/core/options.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package core - -import ( - "time" - - "github.com/ligato/cn-infra/logging" -) - -// Option defines the maximum time for which the notification delivery is attempted. -type Option interface { - //OptionMarkerCore is just for marking implementation that implements this interface. - OptionMarkerCore() -} - -// WithTimeoutOpt defines the maximum time for which the notification delivery is attempted. -type WithTimeoutOpt struct { - Timeout time.Duration -} - -// WithTimeout creates an option for ToChan function that defines a notification delivery timeout. -func WithTimeout(timeout time.Duration) *WithTimeoutOpt { - return &WithTimeoutOpt{Timeout: timeout} -} - -// OptionMarkerCore is only for marking implementation that implements this interface. -func (marker *WithTimeoutOpt) OptionMarkerCore() {} - -// WithLoggerOpt defines a logger that logs if notification delivery is unsuccessful. -type WithLoggerOpt struct { - Logger logging.Logger -} - -// WithLogger creates an option for ToChan function that specifies a logger to be used. -func WithLogger(logger logging.Logger) *WithLoggerOpt { - return &WithLoggerOpt{Logger: logger} -} - -// OptionMarkerCore is just for marking implementation that implements this interface. -func (marker *WithLoggerOpt) OptionMarkerCore() {} - -// WithPluginsOpt is used in NewAgent() -type WithPluginsOpt interface { - Option - - // return list named plugins with injected dependencies - // the order in list impacts the order of Init(), AfterInit(), Close() sequence - Plugins(...Flavor) []*NamedPlugin -} - -// WithPlugin for adding a custom plugins to the Agent -// -// Example: -// -// flavor := &MyFlavor{} -// flavor.Inject() -// NewAgent(myFlavor, WithPlugin("my-plugin", &MyPlugin{DependencyXY: &flavor.ETCD})) -// })) -func WithPlugin(pluginName string, plugin Plugin) WithPluginsOpt { - return &withPluginOpt{&NamedPlugin{PluginName(pluginName), plugin}} -} - -// WithPlugin -type withPluginOpt struct { - plugin *NamedPlugin -} - -// Plugins is just for marking implementation that it implements this interface -func (opt *withPluginOpt) Plugins(...Flavor) []*NamedPlugin { - return []*NamedPlugin{opt.plugin} -} - -// OptionMarkerCore is just for marking implementation that it implements this interface -func (opt *withPluginOpt) OptionMarkerCore() {} diff --git a/vendor/github.com/ligato/cn-infra/core/plugin_spi.go b/vendor/github.com/ligato/cn-infra/core/plugin_spi.go deleted file mode 100644 index d36af0bdf1..0000000000 --- a/vendor/github.com/ligato/cn-infra/core/plugin_spi.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package core - -// Plugin interface defines plugin's basic life-cycle methods. -type Plugin interface { - // Init is called in the agent`s startup phase. - Init() error - // Close is called in the agent`s cleanup phase. - Close() error -} - -// PostInit interface defines an optional method for plugins with complex initialization. -type PostInit interface { - // AfterInit is called once Init() of all plugins have returned without error. - AfterInit() error -} diff --git a/vendor/github.com/ligato/cn-infra/core/version.go b/vendor/github.com/ligato/cn-infra/core/version.go deleted file mode 100644 index 11c026294a..0000000000 --- a/vendor/github.com/ligato/cn-infra/core/version.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2018 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package core - -// Variables set by the compiler using ldflags -var ( - // BuildVersion describes version for the build. It is usually set using `git describe --always --tags --dirty`. - BuildVersion string - // BuildDate describes time of the build. - BuildDate string - // CommitHash describes commit hash for the build. - CommitHash string -) diff --git a/vendor/github.com/ligato/cn-infra/datasync/aggregator.go b/vendor/github.com/ligato/cn-infra/datasync/aggregator.go index 1ffcc46f47..3ce71c6ea4 100644 --- a/vendor/github.com/ligato/cn-infra/datasync/aggregator.go +++ b/vendor/github.com/ligato/cn-infra/datasync/aggregator.go @@ -44,9 +44,11 @@ type AggregatedRegistration struct { // The function implements KeyValProtoWatcher.Watch(). func (ta *CompositeKVProtoWatcher) Watch(resyncName string, changeChan chan ChangeEvent, resyncChan chan ResyncEvent, keyPrefixes ...string) (WatchRegistration, error) { + var registrations []WatchRegistration - for _, transport := range ta.Adapters { - watcherReg, err := transport.Watch(resyncName, changeChan, resyncChan, keyPrefixes...) + + for _, adapter := range ta.Adapters { + watcherReg, err := adapter.Watch(resyncName, changeChan, resyncChan, keyPrefixes...) if err != nil { return nil, err } diff --git a/vendor/github.com/ligato/cn-infra/datasync/kvdbsync/local/local_bytes_txn.go b/vendor/github.com/ligato/cn-infra/datasync/kvdbsync/local/local_bytes_txn.go index 72ce9d40d3..b0742f61a0 100644 --- a/vendor/github.com/ligato/cn-infra/datasync/kvdbsync/local/local_bytes_txn.go +++ b/vendor/github.com/ligato/cn-infra/datasync/kvdbsync/local/local_bytes_txn.go @@ -22,45 +22,54 @@ import ( "github.com/ligato/cn-infra/db/keyval" ) -// NewBytesTxn is a constructor. -func NewBytesTxn(commit func(map[string] /*key*/ datasync.ChangeValue) error) *BytesTxn { - return &BytesTxn{items: map[string] /*key*/ *BytesTxnItem{}, commit: commit} +// BytesTxnItem is used in BytesTxn. +type BytesTxnItem struct { + Data []byte + Delete bool } // BytesTxn is just a concurrent map of Bytes messages. // The intent is to collect the user data and propagate them when commit happens. type BytesTxn struct { - items map[string] /*key*/ *BytesTxnItem access sync.Mutex - commit func(map[string] /*key*/ datasync.ChangeValue) error + items map[string]*BytesTxnItem + commit func(map[string]datasync.ChangeValue) error } -//Put adds store operation into transaction. +// NewBytesTxn is a constructor. +func NewBytesTxn(commit func(map[string]datasync.ChangeValue) error) *BytesTxn { + return &BytesTxn{ + items: make(map[string]*BytesTxnItem), + commit: commit, + } +} + +// Put adds store operation into transaction. func (txn *BytesTxn) Put(key string, data []byte) keyval.BytesTxn { txn.access.Lock() defer txn.access.Unlock() - txn.items[key] = &BytesTxnItem{data, false} + txn.items[key] = &BytesTxnItem{Data: data} return txn } -//Delete add delete operation into transaction. +// Delete add delete operation into transaction. func (txn *BytesTxn) Delete(key string) keyval.BytesTxn { txn.access.Lock() defer txn.access.Unlock() - txn.items[key] = &BytesTxnItem{nil, true} + txn.items[key] = &BytesTxnItem{Delete: true} return txn } -//Commit executes the transaction. +// Commit executes the transaction. func (txn *BytesTxn) Commit() error { txn.access.Lock() defer txn.access.Unlock() - kvs := map[string] /*key*/ datasync.ChangeValue{} + kvs := map[string]datasync.ChangeValue{} for key, item := range txn.items { changeType := datasync.Put if item.Delete { @@ -71,9 +80,3 @@ func (txn *BytesTxn) Commit() error { } return txn.commit(kvs) } - -// BytesTxnItem is used in BytesTxn. -type BytesTxnItem struct { - Data []byte - Delete bool -} diff --git a/vendor/github.com/ligato/cn-infra/datasync/kvdbsync/local/local_proto_txn.go b/vendor/github.com/ligato/cn-infra/datasync/kvdbsync/local/local_proto_txn.go index a037bae9ed..16e1405426 100644 --- a/vendor/github.com/ligato/cn-infra/datasync/kvdbsync/local/local_proto_txn.go +++ b/vendor/github.com/ligato/cn-infra/datasync/kvdbsync/local/local_proto_txn.go @@ -23,45 +23,62 @@ import ( "github.com/ligato/cn-infra/db/keyval" ) -// NewProtoTxn is a constructor. -func NewProtoTxn(commit func(map[string] /*key*/ datasync.ChangeValue) error) *ProtoTxn { - return &ProtoTxn{items: map[string] /*key*/ *ProtoTxnItem{}, commit: commit} +// ProtoTxnItem is used in ProtoTxn. +type ProtoTxnItem struct { + Data proto.Message + Delete bool } // ProtoTxn is a concurrent map of proto messages. // The intent is to collect the user data and propagate them when commit happens. type ProtoTxn struct { - items map[string] /*key*/ *ProtoTxnItem access sync.Mutex - commit func(map[string] /*key*/ datasync.ChangeValue) error + items map[string]*ProtoTxnItem + commit func(map[string]datasync.ChangeValue) error +} + +// NewProtoTxn is a constructor. +func NewProtoTxn(commit func(map[string]datasync.ChangeValue) error) *ProtoTxn { + return &ProtoTxn{ + items: make(map[string]*ProtoTxnItem), + commit: commit, + } } -//Put adds store operation into transaction. +// GetValue returns the value of the pair. +func (lazy *ProtoTxnItem) GetValue(out proto.Message) error { + if lazy.Data != nil { + proto.Merge(out, lazy.Data) + } + return nil +} + +// Put adds store operation into transaction. func (txn *ProtoTxn) Put(key string, data proto.Message) keyval.ProtoTxn { txn.access.Lock() defer txn.access.Unlock() - txn.items[key] = &ProtoTxnItem{data, false} + txn.items[key] = &ProtoTxnItem{Data: data} return txn } -//Delete adds delete operation into transaction. +// Delete adds delete operation into transaction. func (txn *ProtoTxn) Delete(key string) keyval.ProtoTxn { txn.access.Lock() defer txn.access.Unlock() - txn.items[key] = &ProtoTxnItem{nil, true} + txn.items[key] = &ProtoTxnItem{Delete: true} return txn } -//Commit executes the transaction. +// Commit executes the transaction. func (txn *ProtoTxn) Commit() error { txn.access.Lock() defer txn.access.Unlock() - kvs := map[string] /*key*/ datasync.ChangeValue{} + kvs := map[string]datasync.ChangeValue{} for key, item := range txn.items { changeType := datasync.Put if item.Delete { @@ -72,17 +89,3 @@ func (txn *ProtoTxn) Commit() error { } return txn.commit(kvs) } - -// ProtoTxnItem is used in ProtoTxn. -type ProtoTxnItem struct { - Data proto.Message - Delete bool -} - -// GetValue returns the value of the pair. -func (lazy *ProtoTxnItem) GetValue(out proto.Message) error { - if lazy.Data != nil { - proto.Merge(out, lazy.Data) - } - return nil -} diff --git a/vendor/github.com/ligato/cn-infra/datasync/kvdbsync/options.go b/vendor/github.com/ligato/cn-infra/datasync/kvdbsync/options.go new file mode 100644 index 0000000000..490739ccf6 --- /dev/null +++ b/vendor/github.com/ligato/cn-infra/datasync/kvdbsync/options.go @@ -0,0 +1,59 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvdbsync + +import ( + "fmt" + + "github.com/ligato/cn-infra/infra" + "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/servicelabel" +) + +// NewPlugin creates a new Plugin with the provided Options. +func NewPlugin(opts ...Option) *Plugin { + p := &Plugin{} + + p.PluginName = "kvdb" + p.ServiceLabel = &servicelabel.DefaultPlugin + + for _, o := range opts { + o(p) + } + + prefix := p.String() + if p.Deps.KvPlugin != nil { + if kvdb, ok := p.Deps.KvPlugin.(fmt.Stringer); ok { + prefix = kvdb.String() + } + } + p.Deps.PluginName = infra.PluginName(prefix + "-datasync") + + if p.Deps.Log == nil { + p.Deps.Log = logging.ForPlugin(p.String()) + } + + return p +} + +// Option is a function that can be used in NewPlugin to customize Plugin. +type Option func(*Plugin) + +// UseDeps returns Option that can inject custom dependencies. +func UseDeps(cb func(*Deps)) Option { + return func(p *Plugin) { + cb(&p.Deps) + } +} diff --git a/vendor/github.com/ligato/cn-infra/datasync/kvdbsync/plugin_impl_dbsync.go b/vendor/github.com/ligato/cn-infra/datasync/kvdbsync/plugin_impl_dbsync.go index 94767884e4..8cc114bfd6 100644 --- a/vendor/github.com/ligato/cn-infra/datasync/kvdbsync/plugin_impl_dbsync.go +++ b/vendor/github.com/ligato/cn-infra/datasync/kvdbsync/plugin_impl_dbsync.go @@ -15,14 +15,15 @@ package kvdbsync import ( - "errors" + "fmt" "github.com/golang/protobuf/proto" "github.com/ligato/cn-infra/datasync" "github.com/ligato/cn-infra/datasync/resync" "github.com/ligato/cn-infra/datasync/syncbase" "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/cn-infra/flavors/local" + "github.com/ligato/cn-infra/infra" + "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/servicelabel" ) @@ -36,34 +37,14 @@ type Plugin struct { registry *syncbase.Registry } -type infraDeps interface { - // InfraDeps for getting PlugginInfraDeps instance (logger, config, plugin name, statuscheck) - InfraDeps(pluginName string, opts ...local.InfraDepsOpts) *local.PluginInfraDeps -} - -// OfDifferentAgent allows accessing DB of a different agent (with a particular microservice label). -// This method is a shortcut to simplify creating new instance of a plugin -// that is supposed to watch different agent DB. -// Method intentionally copies instance of a plugin (assuming it has set all dependencies) -// and sets microservice label. -func (plugin /*intentionally without pointer receiver*/ Plugin) OfDifferentAgent( - microserviceLabel string, infraDeps infraDeps) *Plugin { - - // plugin name suffixed by micorservice label - plugin.Deps.PluginInfraDeps = *infraDeps.InfraDeps(string( - plugin.Deps.PluginInfraDeps.PluginName) + "-" + microserviceLabel) - - // this is important - here comes microservice label of different agent - plugin.Deps.PluginInfraDeps.ServiceLabel = servicelabel.OfDifferentAgent(microserviceLabel) - return &plugin // copy (no pointer receiver) -} - // Deps groups dependencies injected into the plugin so that they are // logically separated from other plugin fields. type Deps struct { - local.PluginInfraDeps // inject - ResyncOrch resync.Subscriber // inject - KvPlugin keyval.KvProtoPlugin // inject + infra.PluginName // inject + Log logging.PluginLogger // inject + ServiceLabel servicelabel.ReaderAPI + KvPlugin keyval.KvProtoPlugin // inject + ResyncOrch resync.Subscriber // inject } // Init only initializes plugin.registry. @@ -80,7 +61,24 @@ func (plugin *Plugin) Init() error { // The order of plugins in flavor is not important to resync // since Watch() is called in Plugin.Init() and Resync.Register() // is called in Plugin.AfterInit(). +// +// If provided connection is not ready (not connected), AfterInit starts new goroutine in order to +// 'wait' for the connection. After that, the new transport watcher is built as usual. func (plugin *Plugin) AfterInit() error { + if plugin.KvPlugin == nil || plugin.KvPlugin.Disabled() { + return nil + } + // Define function executed on kv plugin connection + plugin.KvPlugin.OnConnect(func() error { + if err := plugin.initKvPlugin(); err != nil { + return fmt.Errorf("init KV plugin %v failed: %v", plugin.KvPlugin.String(), err) + } + return nil + }) + return nil +} + +func (plugin *Plugin) initKvPlugin() error { if plugin.KvPlugin != nil && !plugin.KvPlugin.Disabled() { db := plugin.KvPlugin.NewBroker(plugin.ServiceLabel.GetAgentPrefix()) dbW := plugin.KvPlugin.NewWatcher(plugin.ServiceLabel.GetAgentPrefix()) @@ -125,7 +123,7 @@ func (plugin *Plugin) Put(key string, data proto.Message, opts ...datasync.PutOp return plugin.adapter.db.Put(key, data, opts...) } - return errors.New("Transport adapter is not ready yet. (Probably called before AfterInit)") + return fmt.Errorf("transport adapter is not ready yet. (Probably called before AfterInit)") } // Delete propagates this call to a particular kvdb.Plugin unless the kvdb.Plugin is Disabled(). @@ -140,18 +138,10 @@ func (plugin *Plugin) Delete(key string, opts ...datasync.DelOption) (existed bo return plugin.adapter.db.Delete(key, opts...) } - return false, errors.New("Transport adapter is not ready yet. (Probably called before AfterInit)") + return false, fmt.Errorf("transport adapter is not ready yet. (Probably called before AfterInit)") } // Close resources. func (plugin *Plugin) Close() error { return nil } - -// String returns Deps.PluginName if set, "kvdbsync" otherwise. -func (plugin *Plugin) String() string { - if len(plugin.PluginName) == 0 { - return "kvdbsync" - } - return string(plugin.PluginName) -} diff --git a/vendor/github.com/ligato/cn-infra/datasync/msgsync/options.go b/vendor/github.com/ligato/cn-infra/datasync/msgsync/options.go new file mode 100644 index 0000000000..dfe66e11d9 --- /dev/null +++ b/vendor/github.com/ligato/cn-infra/datasync/msgsync/options.go @@ -0,0 +1,57 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package msgsync + +import ( + "github.com/ligato/cn-infra/config" + "github.com/ligato/cn-infra/logging" +) + +// NewPlugin creates a new Plugin with the provided Options. +func NewPlugin(opts ...Option) *Plugin { + p := &Plugin{} + + p.PluginName = "msgsync" + + for _, o := range opts { + o(p) + } + + if p.Deps.Log == nil { + p.Deps.Log = logging.ForPlugin(p.String()) + } + if p.Deps.PluginConfig == nil { + p.Deps.PluginConfig = config.ForPlugin(p.String()) + } + + return p +} + +// Option is a function that can be used in NewPlugin to customize Plugin. +type Option func(*Plugin) + +// UseDeps returns Option that can inject custom dependencies. +func UseDeps(cb func(*Deps)) Option { + return func(p *Plugin) { + cb(&p.Deps) + } +} + +// UseConf returns Option which injects a particular configuration. +func UseConf(conf Cfg) Option { + return func(p *Plugin) { + p.Cfg = conf + } +} diff --git a/vendor/github.com/ligato/cn-infra/datasync/msgsync/plugin_impl_msgsync.go b/vendor/github.com/ligato/cn-infra/datasync/msgsync/plugin_impl_msgsync.go index f261691f0f..1af318011f 100644 --- a/vendor/github.com/ligato/cn-infra/datasync/msgsync/plugin_impl_msgsync.go +++ b/vendor/github.com/ligato/cn-infra/datasync/msgsync/plugin_impl_msgsync.go @@ -19,23 +19,24 @@ import ( "github.com/golang/protobuf/proto" "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/flavors/local" + "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/messaging" ) -// PubPlugin implements KeyProtoValWriter that propagates protobuf messages +// Plugin implements KeyProtoValWriter that propagates protobuf messages // to a particular topic (unless the messaging.Mux is not disabled). -type PubPlugin struct { - Deps // inject +type Plugin struct { + Deps // inject + + Cfg adapter messaging.ProtoPublisher } // Deps groups dependencies injected into the plugin so that they are // logically separated from other plugin fields. type Deps struct { - local.PluginInfraDeps // inject - Messaging messaging.Mux // inject - Cfg + infra.Deps + Messaging messaging.Mux // inject } // Cfg groups configurations fields. It can be extended with other fields @@ -45,14 +46,14 @@ type Cfg struct { } // Init does nothing. -func (plugin *PubPlugin) Init() error { +func (plugin *Plugin) Init() error { return nil } // AfterInit uses provided MUX connection to build new publisher. -func (plugin *PubPlugin) AfterInit() error { +func (plugin *Plugin) AfterInit() error { if !plugin.Messaging.Disabled() { - cfg := plugin.Deps.Cfg + cfg := plugin.Cfg plugin.PluginConfig.GetValue(&cfg) if cfg.Topic != "" { @@ -70,7 +71,7 @@ func (plugin *PubPlugin) AfterInit() error { // Put propagates this call to a particular messaging Publisher. // // This method is supposed to be called in PubPlugin.AfterInit() or later (even from different go routine). -func (plugin *PubPlugin) Put(key string, data proto.Message, opts ...datasync.PutOption) error { +func (plugin *Plugin) Put(key string, data proto.Message, opts ...datasync.PutOption) error { if plugin.Messaging.Disabled() { return nil } @@ -83,14 +84,6 @@ func (plugin *PubPlugin) Put(key string, data proto.Message, opts ...datasync.Pu } // Close resources. -func (plugin *PubPlugin) Close() error { +func (plugin *Plugin) Close() error { return nil } - -// String returns Deps.PluginName if set, "pub-msgsync" otherwise. -func (plugin *PubPlugin) String() string { - if len(plugin.PluginName) == 0 { - return "pub-msgsync" - } - return string(plugin.PluginName) -} diff --git a/vendor/github.com/ligato/cn-infra/datasync/resync/event.go b/vendor/github.com/ligato/cn-infra/datasync/resync/event.go index 4e065ff170..8ac331100c 100644 --- a/vendor/github.com/ligato/cn-infra/datasync/resync/event.go +++ b/vendor/github.com/ligato/cn-infra/datasync/resync/event.go @@ -17,11 +17,12 @@ package resync // Status used in the events. type Status string -// Started means that the Resync has started. -const Started Status = "Started" - -// NotActive means that Resync has not started yet or it has been finished. -const NotActive Status = "NotActive" +const ( + // Started means that the Resync has started. + Started Status = "Started" + // NotActive means that Resync has not started yet or it has been finished. + NotActive Status = "NotActive" +) // StatusEvent is the base type that will be propagated to the channel. type StatusEvent interface { diff --git a/vendor/github.com/ligato/cn-infra/datasync/resync/options.go b/vendor/github.com/ligato/cn-infra/datasync/resync/options.go new file mode 100644 index 0000000000..c450056674 --- /dev/null +++ b/vendor/github.com/ligato/cn-infra/datasync/resync/options.go @@ -0,0 +1,35 @@ +package resync + +import ( + "github.com/ligato/cn-infra/logging" +) + +// DefaultPlugin is a default instance of Plugin. +var DefaultPlugin = *NewPlugin() + +// NewPlugin creates a new Plugin with the provided Options. +func NewPlugin(opts ...Option) *Plugin { + p := &Plugin{} + + p.PluginName = "resync" + + for _, o := range opts { + o(p) + } + + if p.Deps.Log == nil { + p.Deps.Log = logging.ForPlugin(p.String()) + } + + return p +} + +// Option is a function that can be used in NewPlugin to customize Plugin. +type Option func(*Plugin) + +// UseDeps returns Option that can inject custom dependencies. +func UseDeps(cb func(*Deps)) Option { + return func(p *Plugin) { + cb(&p.Deps) + } +} diff --git a/vendor/github.com/ligato/cn-infra/datasync/resync/plugin_api_resync.go b/vendor/github.com/ligato/cn-infra/datasync/resync/plugin_api_resync.go index ef0c46932c..acf3df0052 100644 --- a/vendor/github.com/ligato/cn-infra/datasync/resync/plugin_api_resync.go +++ b/vendor/github.com/ligato/cn-infra/datasync/resync/plugin_api_resync.go @@ -14,9 +14,7 @@ package resync -import ( - "github.com/ligato/cn-infra/core" -) +import "github.com/ligato/cn-infra/infra" // Subscriber is an API used by plugins to register for notifications from the // RESYNC Orcherstrator. @@ -33,5 +31,5 @@ type Subscriber interface { type Reporter interface { // ReportError is called by Plugins when the binary api call was not successful. // Based on that the Resync Orchestrator starts the Resync. - ReportError(name core.PluginName, err error) + ReportError(name infra.PluginName, err error) } diff --git a/vendor/github.com/ligato/cn-infra/datasync/resync/plugin_impl_resync.go b/vendor/github.com/ligato/cn-infra/datasync/resync/plugin_impl_resync.go index ce2ff3e8d3..da34b970d4 100644 --- a/vendor/github.com/ligato/cn-infra/datasync/resync/plugin_impl_resync.go +++ b/vendor/github.com/ligato/cn-infra/datasync/resync/plugin_impl_resync.go @@ -18,7 +18,8 @@ import ( "sync" "time" - "github.com/ligato/cn-infra/flavors/local" + "github.com/ligato/cn-infra/infra" + "github.com/ligato/cn-infra/logging" ) const ( @@ -37,7 +38,8 @@ type Plugin struct { // Deps groups dependencies injected into the plugin so that they are // logically separated from other plugin fields. type Deps struct { - local.PluginLogDeps // inject + infra.PluginName // inject + Log logging.PluginLogger } // Init initializes variables. diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/bytes_watcher_api.go b/vendor/github.com/ligato/cn-infra/db/keyval/bytes_watcher_api.go index cbf38a8760..a9ca8e713a 100644 --- a/vendor/github.com/ligato/cn-infra/db/keyval/bytes_watcher_api.go +++ b/vendor/github.com/ligato/cn-infra/db/keyval/bytes_watcher_api.go @@ -17,7 +17,6 @@ package keyval import ( "time" - "github.com/ligato/cn-infra/core" "github.com/ligato/cn-infra/datasync" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" @@ -47,14 +46,14 @@ func ToChan(ch chan BytesWatchResp, opts ...interface{}) func(dto BytesWatchResp timeout := datasync.DefaultNotifTimeout var logger logging.Logger = logrus.DefaultLogger() - for _, opt := range opts { + /*for _, opt := range opts { switch opt.(type) { case *core.WithLoggerOpt: logger = opt.(*core.WithLoggerOpt).Logger case *core.WithTimeoutOpt: timeout = opt.(*core.WithTimeoutOpt).Timeout } - } + }*/ return func(dto BytesWatchResp) { select { diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/consul/README.md b/vendor/github.com/ligato/cn-infra/db/keyval/consul/README.md deleted file mode 100644 index 150377bd7e..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/keyval/consul/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# Consul plugin - -The Consul plugin provides access to a consul key-value data store. - -## Configuration - -- Location of the Consul configuration file can be defined either by the - command line flag `consul-config` or set via the `CONSUL_CONFIG` - environment variable. - -## Status Check - -- If injected, Consul plugin will use StatusCheck plugin to periodically - issue a minimalistic GET request to check for the status of the connection. - The consul connection state affects the global status of the agent. - If agent cannot establish connection with consul, both the readiness - and the liveness probe from the [probe plugin](../../../health/probe) - will return a negative result (accessible only via REST API in such - case). - -## Reconnect resynchronization - -- If connection to the Consul is interrupted, resync can be automatically called - after re-connection. This option is disabled by default and has to be allowed - in the etcd.conf file. - - Set `resync-after-reconnect` to `true` to enable the feature. diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/consul/consul.conf b/vendor/github.com/ligato/cn-infra/db/keyval/consul/consul.conf deleted file mode 100644 index 2e30b59d0f..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/keyval/consul/consul.conf +++ /dev/null @@ -1,6 +0,0 @@ -# Address of the Consul server -address: 0.0.0.0:8500 - -# If Consul server lost connection, the flag allows to automatically run the whole resync procedure -# for all registered plugins if it reconnects -resync-after-reconnect: true \ No newline at end of file diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/consul/consul.go b/vendor/github.com/ligato/cn-infra/db/keyval/consul/consul.go deleted file mode 100644 index b3d01b543a..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/keyval/consul/consul.go +++ /dev/null @@ -1,549 +0,0 @@ -// Copyright (c) 2018 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package consul - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/logrus" - - "github.com/hashicorp/consul/api" -) - -var consulLogger = logrus.NewLogger("consul") - -func init() { - if os.Getenv("DEBUG_CONSUL_CLIENT") != "" { - consulLogger.SetLevel(logging.DebugLevel) - } -} - -func transformKey(key string) string { - return strings.TrimPrefix(key, "/") -} - -// Client serves as a client for Consul KV storage and implements keyval.CoreBrokerWatcher interface. -type Client struct { - client *api.Client -} - -// NewClient creates new client for Consul using given address. -func NewClient(cfg *api.Config) (store *Client, err error) { - var c *api.Client - if c, err = api.NewClient(cfg); err != nil { - return nil, fmt.Errorf("failed to create Consul client %s", err) - } - - peers, err := c.Status().Peers() - if err != nil { - return nil, err - } - consulLogger.Infof("consul peers: %v", peers) - - return &Client{ - client: c, - }, nil - -} - -// Put stores given data for the key. -func (c *Client) Put(key string, data []byte, opts ...datasync.PutOption) error { - consulLogger.Debugf("Put: %q", key) - p := &api.KVPair{Key: transformKey(key), Value: data} - _, err := c.client.KV().Put(p, nil) - if err != nil { - return err - } - - return nil -} - -// NewTxn creates new transaction. -func (c *Client) NewTxn() keyval.BytesTxn { - return &txn{ - kv: c.client.KV(), - } -} - -// GetValue returns data for the given key. -func (c *Client) GetValue(key string) (data []byte, found bool, revision int64, err error) { - consulLogger.Debugf("GetValue: %q", key) - pair, _, err := c.client.KV().Get(transformKey(key), nil) - if err != nil { - return nil, false, 0, err - } else if pair == nil { - return nil, false, 0, nil - } - - return pair.Value, true, int64(pair.ModifyIndex), nil -} - -// ListValues returns interator with key-value pairs for given key prefix. -func (c *Client) ListValues(key string) (keyval.BytesKeyValIterator, error) { - pairs, _, err := c.client.KV().List(transformKey(key), nil) - if err != nil { - return nil, err - } - - return &bytesKeyValIterator{len: len(pairs), pairs: pairs}, nil -} - -// ListKeys returns interator with keys for given key prefix. -func (c *Client) ListKeys(prefix string) (keyval.BytesKeyIterator, error) { - keys, _, err := c.client.KV().Keys(transformKey(prefix), "", nil) - if err != nil { - return nil, err - } - - return &bytesKeyIterator{len: len(keys), keys: keys}, nil -} - -// Delete deletes given key. -func (c *Client) Delete(key string, opts ...datasync.DelOption) (existed bool, err error) { - consulLogger.Debugf("Delete: %q", key) - if _, err := c.client.KV().Delete(transformKey(key), nil); err != nil { - return false, err - } - - return true, nil -} - -// Watch watches given list of key prefixes. -func (c *Client) Watch(resp func(keyval.BytesWatchResp), closeChan chan string, keys ...string) error { - consulLogger.Debug("Watch:", keys) - for _, k := range keys { - if err := c.watch(resp, closeChan, k); err != nil { - return err - } - } - return nil -} - -type watchResp struct { - typ datasync.PutDel - key string - value, prevValue []byte - rev int64 -} - -// GetChangeType returns "Put" for BytesWatchPutResp. -func (resp *watchResp) GetChangeType() datasync.PutDel { - return resp.typ -} - -// GetKey returns the key that the value has been inserted under. -func (resp *watchResp) GetKey() string { - return resp.key -} - -// GetValue returns the value that has been inserted. -func (resp *watchResp) GetValue() []byte { - return resp.value -} - -// GetPrevValue returns the previous value that has been inserted. -func (resp *watchResp) GetPrevValue() []byte { - return resp.prevValue -} - -// GetRevision returns the revision associated with the 'put' operation. -func (resp *watchResp) GetRevision() int64 { - return resp.rev -} - -func (c *Client) watch(resp func(watchResp keyval.BytesWatchResp), closeCh chan string, prefix string) error { - consulLogger.Debug("watch:", prefix) - - ctx, cancel := context.WithCancel(context.Background()) - - recvChan := c.watchPrefix(ctx, prefix) - - go func(regPrefix string) { - defer cancel() - for { - select { - case wr, ok := <-recvChan: - if !ok { - consulLogger.WithField("prefix", prefix). - Debug("Watch recv chan was closed") - return - } - for _, ev := range wr.Events { - key := ev.Key - if !strings.HasPrefix(key, "/") && strings.HasPrefix(regPrefix, "/") { - key = "/" + key - } - var r keyval.BytesWatchResp - if ev.Type == datasync.Put { - r = &watchResp{ - typ: datasync.Put, - key: key, - value: ev.Value, - prevValue: ev.PrevValue, - rev: ev.Revision, - } - } else { - r = &watchResp{ - typ: datasync.Delete, - key: key, - value: ev.Value, - rev: ev.Revision, - } - } - resp(r) - } - case closeVal, ok := <-closeCh: - if !ok || closeVal == regPrefix { - consulLogger.WithField("prefix", prefix). - Debug("Watch ended") - return - } - } - } - }(prefix) - - return nil -} - -type watchEvent struct { - Type datasync.PutDel - Key string - Value []byte - PrevValue []byte - Revision int64 -} - -type watchResponse struct { - Events []*watchEvent - Err error -} - -func (c *Client) watchPrefix(ctx context.Context, prefix string) <-chan watchResponse { - consulLogger.Debug("watchPrefix:", prefix) - - ch := make(chan watchResponse, 1) - - // Retrieve KV pairs and latest index - qOpt := &api.QueryOptions{} - oldPairs, qm, err := c.client.KV().List(prefix, qOpt.WithContext(ctx)) - if err != nil { - ch <- watchResponse{Err: err} - close(ch) - return ch - } - - oldIndex := qm.LastIndex - oldPairsMap := make(map[string]*api.KVPair) - - consulLogger.Debugf("prefix %v listing %v pairs (last index: %v)", prefix, len(oldPairs), oldIndex) - for _, pair := range oldPairs { - consulLogger.Debugf(" - key: %q create: %v modify: %v value: %v", pair.Key, pair.CreateIndex, pair.ModifyIndex, len(pair.Value)) - oldPairsMap[pair.Key] = pair - } - - go func() { - for { - // Wait for an update to occur since the last index - var newPairs api.KVPairs - qOpt := &api.QueryOptions{ - WaitIndex: oldIndex, - } - newPairs, qm, err = c.client.KV().List(prefix, qOpt.WithContext(ctx)) - if err != nil { - ch <- watchResponse{Err: err} - close(ch) - return - } - newIndex := qm.LastIndex - - // If the index is same as old one, request probably timed out, so we start again - if oldIndex == newIndex { - consulLogger.Debug("index unchanged, next round") - continue - } - - consulLogger.Debugf("prefix %q: listing %v new pairs, new index: %v (old index: %v)", prefix, len(newPairs), newIndex, oldIndex) - for _, pair := range newPairs { - consulLogger.Debugf(" + key: %q create: %v modify: %v value: %v", pair.Key, pair.CreateIndex, pair.ModifyIndex, len(pair.Value)) - } - - var evs []*watchEvent - - // Search for all created and modified KV - for _, pair := range newPairs { - if pair.ModifyIndex > oldIndex { - var prevVal []byte - if oldPair, ok := oldPairsMap[pair.Key]; ok { - prevVal = oldPair.Value - } - consulLogger.Debugf(" * modified key: %v prevValue: %v prevModify: %v", pair.Key, len(pair.Value), len(prevVal)) - evs = append(evs, &watchEvent{ - Type: datasync.Put, - Key: pair.Key, - Value: pair.Value, - PrevValue: prevVal, - Revision: int64(pair.ModifyIndex), - }) - } - delete(oldPairsMap, pair.Key) - } - // Search for all deleted KV - for _, pair := range oldPairsMap { - evs = append(evs, &watchEvent{ - Type: datasync.Delete, - Key: pair.Key, - PrevValue: pair.Value, - Revision: int64(pair.ModifyIndex), - }) - } - - // Prepare latest KV pairs and last index for next round - oldIndex = newIndex - oldPairsMap = make(map[string]*api.KVPair) - for _, pair := range newPairs { - oldPairsMap[pair.Key] = pair - } - - ch <- watchResponse{Events: evs} - } - }() - return ch -} - -// Close returns nil. -func (c *Client) Close() error { - return nil -} - -// NewBroker creates a new instance of a proxy that provides -// access to etcd. The proxy will reuse the connection from Client. -// will be prepended to the key argument in all calls from the created -// BrokerWatcher. To avoid using a prefix, pass keyval. Root constant as -// an argument. -func (c *Client) NewBroker(prefix string) keyval.BytesBroker { - return &BrokerWatcher{ - Client: c, - prefix: prefix, - } -} - -// NewWatcher creates a new instance of a proxy that provides -// access to etcd. The proxy will reuse the connection from Client. -// will be prepended to the key argument in all calls on created -// BrokerWatcher. To avoid using a prefix, pass keyval. Root constant as -// an argument. -func (c *Client) NewWatcher(prefix string) keyval.BytesWatcher { - return &BrokerWatcher{ - Client: c, - prefix: prefix, - } -} - -// BrokerWatcher uses Client to access the datastore. -// The connection can be shared among multiple BrokerWatcher. -// In case of accessing a particular subtree in Consul only, -// BrokerWatcher allows defining a keyPrefix that is prepended -// to all keys in its methods in order to shorten keys used in arguments. -type BrokerWatcher struct { - *Client - prefix string -} - -func (pdb *BrokerWatcher) prefixKey(key string) string { - return filepath.Join(pdb.prefix, key) -} - -// Put calls 'Put' function of the underlying BytesConnectionEtcd. -// KeyPrefix defined in constructor is prepended to the key argument. -func (pdb *BrokerWatcher) Put(key string, data []byte, opts ...datasync.PutOption) error { - return pdb.Client.Put(pdb.prefixKey(key), data, opts...) -} - -// NewTxn creates a new transaction. -// KeyPrefix defined in constructor will be prepended to all key arguments -// in the transaction. -func (pdb *BrokerWatcher) NewTxn() keyval.BytesTxn { - return pdb.Client.NewTxn() -} - -// GetValue calls 'GetValue' function of the underlying BytesConnectionEtcd. -// KeyPrefix defined in constructor is prepended to the key argument. -func (pdb *BrokerWatcher) GetValue(key string) (data []byte, found bool, revision int64, err error) { - return pdb.Client.GetValue(pdb.prefixKey(key)) -} - -// ListValues calls 'ListValues' function of the underlying BytesConnectionEtcd. -// KeyPrefix defined in constructor is prepended to the key argument. -// The prefix is removed from the keys of the returned values. -func (pdb *BrokerWatcher) ListValues(key string) (keyval.BytesKeyValIterator, error) { - pairs, _, err := pdb.client.KV().List(pdb.prefixKey(key), nil) - if err != nil { - return nil, err - } - - return &bytesKeyValIterator{len: len(pairs), pairs: pairs, prefix: pdb.prefix}, nil -} - -// ListKeys calls 'ListKeys' function of the underlying BytesConnectionEtcd. -// KeyPrefix defined in constructor is prepended to the argument. -func (pdb *BrokerWatcher) ListKeys(prefix string) (keyval.BytesKeyIterator, error) { - keys, qm, err := pdb.client.KV().Keys(pdb.prefixKey(prefix), "", nil) - if err != nil { - return nil, err - } - - return &bytesKeyIterator{len: len(keys), keys: keys, prefix: pdb.prefix, lastIndex: qm.LastIndex}, nil -} - -// Delete calls 'Delete' function of the underlying BytesConnectionEtcd. -// KeyPrefix defined in constructor is prepended to the key argument. -func (pdb *BrokerWatcher) Delete(key string, opts ...datasync.DelOption) (existed bool, err error) { - return pdb.Client.Delete(pdb.prefixKey(key), opts...) -} - -// Watch starts subscription for changes associated with the selected . -// KeyPrefix defined in constructor is prepended to all in the argument -// list. The prefix is removed from the keys returned in watch events. -// Watch events will be delivered to callback. -func (pdb *BrokerWatcher) Watch(resp func(keyval.BytesWatchResp), closeChan chan string, keys ...string) error { - var prefixedKeys []string - for _, key := range keys { - prefixedKeys = append(prefixedKeys, pdb.prefixKey(key)) - } - return pdb.Client.Watch(func(origResp keyval.BytesWatchResp) { - r := origResp.(*watchResp) - r.key = strings.TrimPrefix(r.key, pdb.prefix) - resp(r) - }, closeChan, prefixedKeys...) -} - -// bytesKeyIterator is an iterator returned by ListKeys call. -type bytesKeyIterator struct { - index int - len int - keys []string - prefix string - lastIndex uint64 -} - -// GetNext returns the following key (+ revision) from the result set. -// When there are no more keys to get, is returned as *true* -// and and are default values. -func (it *bytesKeyIterator) GetNext() (key string, rev int64, stop bool) { - if it.index >= it.len { - return "", 0, true - } - - key = string(it.keys[it.index]) - if !strings.HasPrefix(key, "/") && strings.HasPrefix(it.prefix, "/") { - key = "/" + key - } - if it.prefix != "" { - key = strings.TrimPrefix(key, it.prefix) - } - rev = int64(it.lastIndex) - it.index++ - - return key, rev, false -} - -// Close does nothing since db cursors are not needed. -// The method is required by the code since it implements Iterator API. -func (it *bytesKeyIterator) Close() error { - return nil -} - -// bytesKeyValIterator is an iterator returned by ListValues call. -type bytesKeyValIterator struct { - index int - len int - pairs api.KVPairs - prefix string -} - -// GetNext returns the following item from the result set. -// When there are no more items to get, is returned as *true* and -// is simply *nil*. -func (it *bytesKeyValIterator) GetNext() (val keyval.BytesKeyVal, stop bool) { - if it.index >= it.len { - return nil, true - } - - key := string(it.pairs[it.index].Key) - if !strings.HasPrefix(key, "/") && strings.HasPrefix(it.prefix, "/") { - key = "/" + key - } - if it.prefix != "" { - key = strings.TrimPrefix(key, it.prefix) - } - data := it.pairs[it.index].Value - rev := int64(it.pairs[it.index].ModifyIndex) - - var prevValue []byte - if len(it.pairs) > 0 && it.index > 0 { - prevValue = it.pairs[it.index-1].Value - } - - it.index++ - - return &bytesKeyVal{key, data, prevValue, rev}, false -} - -// Close does nothing since db cursors are not needed. -// The method is required by the code since it implements Iterator API. -func (it *bytesKeyValIterator) Close() error { - return nil -} - -// bytesKeyVal represents a single key-value pair. -type bytesKeyVal struct { - key string - value []byte - prevValue []byte - revision int64 -} - -// Close does nothing since db cursors are not needed. -// The method is required by the code since it implements Iterator API. -func (kv *bytesKeyVal) Close() error { - return nil -} - -// GetValue returns the value of the pair. -func (kv *bytesKeyVal) GetValue() []byte { - return kv.value -} - -// GetPrevValue returns the previous value of the pair. -func (kv *bytesKeyVal) GetPrevValue() []byte { - return kv.prevValue -} - -// GetKey returns the key of the pair. -func (kv *bytesKeyVal) GetKey() string { - return kv.key -} - -// GetRevision returns the revision associated with the pair. -func (kv *bytesKeyVal) GetRevision() int64 { - return kv.revision -} diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/consul/plugin.go b/vendor/github.com/ligato/cn-infra/db/keyval/consul/plugin.go deleted file mode 100644 index b3596292b3..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/keyval/consul/plugin.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright (c) 2018 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package consul - -import ( - "github.com/hashicorp/consul/api" - "github.com/ligato/cn-infra/core" - "github.com/ligato/cn-infra/datasync/resync" - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/cn-infra/db/keyval/kvproto" - "github.com/ligato/cn-infra/flavors/local" - "github.com/ligato/cn-infra/health/statuscheck" -) - -const ( - // healthCheckProbeKey is a key used to probe connection state - healthCheckProbeKey = "/probe-consul-connection" -) - -// Config represents configuration for Consul plugin. -type Config struct { - Address string `json:"address"` - ReconnectResync bool `json:"resync-after-reconnect"` -} - -// Plugin implements Consul as plugin. -type Plugin struct { - Deps - - // Plugin is disabled if there is no config file available - disabled bool - // Consul client encapsulation - client *Client - // Read/Write proto modelled data - protoWrapper *kvproto.ProtoWrapper - - reconnectResync bool - lastConnErr error -} - -// Deps lists dependencies of the Consul plugin. -// If injected, Consul plugin will use StatusCheck to signal the connection status. -type Deps struct { - local.PluginInfraDeps - Resync *resync.Plugin -} - -// Disabled returns *true* if the plugin is not in use due to missing configuration. -func (plugin *Plugin) Disabled() bool { - return plugin.disabled -} - -func (plugin *Plugin) getConfig() (*Config, error) { - var cfg Config - found, err := plugin.PluginConfig.GetValue(&cfg) - if err != nil { - return nil, err - } - if !found { - plugin.Log.Info("Consul config not found, skip loading this plugin") - plugin.disabled = true - return nil, nil - } - return &cfg, nil -} - -// ConfigToClient transforms Config into api.Config, -// which is ready for use with underlying consul package. -func ConfigToClient(cfg *Config) (*api.Config, error) { - clientCfg := api.DefaultConfig() - if cfg.Address != "" { - clientCfg.Address = cfg.Address - } - return clientCfg, nil -} - -// Init initializes Consul plugin. -func (plugin *Plugin) Init() (err error) { - cfg, err := plugin.getConfig() - if err != nil || plugin.disabled { - return err - } - clientCfg, err := ConfigToClient(cfg) - if err != nil { - return err - } - plugin.client, err = NewClient(clientCfg) - if err != nil { - plugin.Log.Errorf("Err: %v", err) - return err - } - plugin.reconnectResync = cfg.ReconnectResync - plugin.protoWrapper = kvproto.NewProtoWrapperWithSerializer(plugin.client, &keyval.SerializerJSON{}) - - // Register for providing status reports (polling mode). - if plugin.StatusCheck != nil { - plugin.StatusCheck.Register(core.PluginName(plugin.PluginName), func() (statuscheck.PluginState, error) { - _, _, _, err := plugin.client.GetValue(healthCheckProbeKey) - if err == nil { - if plugin.reconnectResync && plugin.lastConnErr != nil { - plugin.Log.Info("Starting resync after Consul reconnect") - if plugin.Resync != nil { - plugin.Resync.DoResync() - plugin.lastConnErr = nil - } else { - plugin.Log.Warn("Expected resync after Consul reconnect could not start beacuse of missing Resync plugin") - } - } - return statuscheck.OK, nil - } - plugin.lastConnErr = err - return statuscheck.Error, err - }) - } else { - plugin.Log.Warnf("Unable to start status check for consul") - } - - return nil -} - -// Close closes Consul plugin. -func (plugin *Plugin) Close() error { - return nil -} - -// NewBroker creates new instance of prefixed broker that provides API with arguments of type proto.Message. -func (plugin *Plugin) NewBroker(keyPrefix string) keyval.ProtoBroker { - return plugin.protoWrapper.NewBroker(keyPrefix) -} - -// NewWatcher creates new instance of prefixed broker that provides API with arguments of type proto.Message. -func (plugin *Plugin) NewWatcher(keyPrefix string) keyval.ProtoWatcher { - return plugin.protoWrapper.NewWatcher(keyPrefix) -} diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/consul/txn.go b/vendor/github.com/ligato/cn-infra/db/keyval/consul/txn.go deleted file mode 100644 index 36839a81d9..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/keyval/consul/txn.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2018 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package consul - -import ( - "fmt" - - "github.com/hashicorp/consul/api" - - "github.com/ligato/cn-infra/db/keyval" -) - -// Txn allows grouping operations into the transaction. Transaction executes -// multiple operations in a more efficient way in contrast to executing -// them one by one. -type txn struct { - ops api.KVTxnOps - kv *api.KV -} - -// Put adds a new 'put' operation to a previously created transaction. -// If the does not exist in the data store, a new key-value item -// will be added to the data store. If exists in the data store, -// the existing value will be overwritten with the from this -// operation. -func (tx *txn) Put(key string, value []byte) keyval.BytesTxn { - tx.ops = append(tx.ops, &api.KVTxnOp{ - Verb: api.KVSet, - Key: key, - Value: value, - }) - return tx -} - -// Delete adds a new 'delete' operation to a previously created -// transaction. If exists in the data store, the associated value -// will be removed. -func (tx *txn) Delete(key string) keyval.BytesTxn { - tx.ops = append(tx.ops, &api.KVTxnOp{ - Verb: api.KVDelete, - Key: key, - }) - return tx -} - -// Commit commits all operations in a transaction to the data store. -// Commit is atomic - either all operations in the transaction are -// committed to the data store, or none of them. -func (tx *txn) Commit() error { - ok, resp, _, err := tx.kv.Txn(tx.ops, nil) - if err != nil { - return err - } else if !ok { - return fmt.Errorf("transaction failed: %v", resp) - } - return nil -} diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/etcd/bytes_broker_impl.go b/vendor/github.com/ligato/cn-infra/db/keyval/etcd/bytes_broker_impl.go index fae53d5325..0d49e9e5e6 100644 --- a/vendor/github.com/ligato/cn-infra/db/keyval/etcd/bytes_broker_impl.go +++ b/vendor/github.com/ligato/cn-infra/db/keyval/etcd/bytes_broker_impl.go @@ -77,7 +77,7 @@ func NewEtcdConnectionWithBytes(config ClientConfig, log logging.Logger) (*Bytes start := time.Now() etcdClient, err := clientv3.New(*config.Config) if err != nil { - log.Errorf("Failed to connect to Etcd etcd(s) %v, Error: '%s'", config.Endpoints, err) + log.Debugf("Unable to connect to ETCD %v, Error: '%s'", config.Endpoints, err) return nil, err } etcdConnectTime := time.Since(start) diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/etcd/config.go b/vendor/github.com/ligato/cn-infra/db/keyval/etcd/config.go index 9b8a71a9bf..dcf77d80ac 100644 --- a/vendor/github.com/ligato/cn-infra/db/keyval/etcd/config.go +++ b/vendor/github.com/ligato/cn-infra/db/keyval/etcd/config.go @@ -40,6 +40,8 @@ type Config struct { CAfile string `json:"ca-file"` AutoCompact time.Duration `json:"auto-compact"` ReconnectResync bool `json:"resync-after-reconnect"` + AllowDelayedStart bool `json:"allow-delayed-start"` + ReconnectInterval time.Duration `json:"reconnect-interval"` } // ClientConfig extends clientv3.Config with configuration options introduced diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/etcd/etcd.conf b/vendor/github.com/ligato/cn-infra/db/keyval/etcd/etcd.conf index d22dc26ac5..5d92889d7a 100644 --- a/vendor/github.com/ligato/cn-infra/db/keyval/etcd/etcd.conf +++ b/vendor/github.com/ligato/cn-infra/db/keyval/etcd/etcd.conf @@ -3,19 +3,19 @@ endpoints: - "172.17.0.1:2379" # Connection fails if it is not established till timeout -dial-timeout: 500 +dial-timeout: 100000000 -# Operation timeout value in millisecond -operation-timeout: 500 +# Operation timeout value in nanoseconds +operation-timeout: 300000000 # Insecure transport omits TLS usage -insecure-transport: true +insecure-transport: false # Controls whether a client verifies the server's certificate chain and host name. # If InsecureSkipVerify is true, TLS accepts any certificate presented by the server # and any host name in that certificate. In this mode, TLS is susceptible to man-in-the-middle # attacks. This should be used only for testing. -insecure-skip-tls-verify +insecure-skip-tls-verify: false # TLS certification file cert-file: @@ -26,9 +26,16 @@ key-file: # CA file used to create a set of x509 certificates ca-file: -# Enable or disabel ETCD auto compaction -auto-compact: true +# Interval between ETCD auto compaction cycles. 0 means disabled. +auto-compact: 0 # If ETCD server lost connection, the flag allows to automatically run the whole resync procedure # for all registered plugins if it reconnects -resync-after-reconnect: false \ No newline at end of file +resync-after-reconnect: false + +# Allow to start without connected ETCD database. Plugin will try to connect and if successful, overall resync will +# be called +allow-delayed-start: false + +# Interval between ETCD reconnect attempts in ns. Default value is 2 seconds. Has no use if `delayed start` is turned off +reconnect-interval: 2000000000 \ No newline at end of file diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/etcd/options.go b/vendor/github.com/ligato/cn-infra/db/keyval/etcd/options.go new file mode 100644 index 0000000000..6dd18675e5 --- /dev/null +++ b/vendor/github.com/ligato/cn-infra/db/keyval/etcd/options.go @@ -0,0 +1,55 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcd + +import ( + "github.com/ligato/cn-infra/config" + "github.com/ligato/cn-infra/health/statuscheck" + "github.com/ligato/cn-infra/logging" +) + +// DefaultPlugin is a default instance of Plugin. +var DefaultPlugin = *NewPlugin() + +// NewPlugin creates a new Plugin with the provided Options. +func NewPlugin(opts ...Option) *Plugin { + p := &Plugin{} + + p.PluginName = "etcd" + p.StatusCheck = &statuscheck.DefaultPlugin + + for _, o := range opts { + o(p) + } + + if p.Deps.Log == nil { + p.Deps.Log = logging.ForPlugin(p.String()) + } + if p.Deps.PluginConfig == nil { + p.Deps.PluginConfig = config.ForPlugin(p.String()) + } + + return p +} + +// Option is a function that can be used in NewPlugin to customize Plugin. +type Option func(*Plugin) + +// UseDeps returns Option that can inject custom dependencies. +func UseDeps(cb func(*Deps)) Option { + return func(p *Plugin) { + cb(&p.Deps) + } +} diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/etcd/plugin_impl_etcd.go b/vendor/github.com/ligato/cn-infra/db/keyval/etcd/plugin_impl_etcd.go index b86171d108..78cece0ce2 100644 --- a/vendor/github.com/ligato/cn-infra/db/keyval/etcd/plugin_impl_etcd.go +++ b/vendor/github.com/ligato/cn-infra/db/keyval/etcd/plugin_impl_etcd.go @@ -16,42 +16,56 @@ package etcd import ( "fmt" + "sync" "time" - "github.com/ligato/cn-infra/core" "github.com/ligato/cn-infra/datasync/resync" "github.com/ligato/cn-infra/db/keyval" "github.com/ligato/cn-infra/db/keyval/kvproto" - "github.com/ligato/cn-infra/flavors/local" "github.com/ligato/cn-infra/health/statuscheck" + "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/utils/safeclose" ) const ( // healthCheckProbeKey is a key used to probe Etcd state healthCheckProbeKey = "/probe-etcd-connection" + // ETCD reconnect interval + defaultReconnectInterval = 2 * time.Second ) // Plugin implements etcd plugin. type Plugin struct { Deps + sync.Mutex + // Plugin is disabled if there is no config file available disabled bool + // Set if connected to ETCD db + connected bool // ETCD connection encapsulation connection *BytesConnectionEtcd // Read/Write proto modelled data protoWrapper *kvproto.ProtoWrapper + // plugin config + config *Config + + // List of callback functions, used in case ETCD is not connected immediately. All plugins using + // ETCD as dependency add their own function if cluster is not reachable. After connection, all + // functions are executed. + onConnection []func() error + autoCompactDone chan struct{} - reconnectResync bool lastConnErr error } // Deps lists dependencies of the etcd plugin. // If injected, etcd plugin will use StatusCheck to signal the connection status. type Deps struct { - local.PluginInfraDeps - Resync *resync.Plugin + infra.Deps + StatusCheck statuscheck.PluginStatusWriter // inject + Resync *resync.Plugin } // Init retrieves ETCD configuration and establishes a new connection @@ -65,52 +79,38 @@ type Deps struct { // the connection cannot be established. func (plugin *Plugin) Init() (err error) { // Read ETCD configuration file. Returns error if does not exists. - etcdCfg, err := plugin.getEtcdConfig() + plugin.config, err = plugin.getEtcdConfig() if err != nil || plugin.disabled { return err } // Transforms .yaml config to ETCD client configuration - etcdClientCfg, err := ConfigToClient(&etcdCfg) + etcdClientCfg, err := ConfigToClient(plugin.config) if err != nil { return err } // Uses config file to establish connection with the database plugin.connection, err = NewEtcdConnectionWithBytes(*etcdClientCfg, plugin.Log) - if err != nil { - plugin.Log.Errorf("Err: %v", err) - return err - } - plugin.reconnectResync = etcdCfg.ReconnectResync - if etcdCfg.AutoCompact > 0 { - if etcdCfg.AutoCompact < time.Duration(time.Minute*60) { - plugin.Log.Warnf("Auto compact option for ETCD is set to less than 60 minutes!") - } - plugin.startPeriodicAutoCompact(etcdCfg.AutoCompact) - } - plugin.protoWrapper = kvproto.NewProtoWrapperWithSerializer(plugin.connection, &keyval.SerializerJSON{}) - // Register for providing status reports (polling mode). if plugin.StatusCheck != nil { - plugin.StatusCheck.Register(core.PluginName(plugin.PluginName), func() (statuscheck.PluginState, error) { - _, _, _, err := plugin.connection.GetValue(healthCheckProbeKey) - if err == nil { - if plugin.reconnectResync && plugin.lastConnErr != nil { - plugin.Log.Info("Starting resync after ETCD reconnect") - if plugin.Resync != nil { - plugin.Resync.DoResync() - plugin.lastConnErr = nil - } else { - plugin.Log.Warn("Expected resync after ETCD reconnect could not start beacuse of missing Resync plugin") - } - } - return statuscheck.OK, nil - } - plugin.lastConnErr = err - return statuscheck.Error, err - }) + plugin.StatusCheck.Register(plugin.PluginName, plugin.statusCheckProbe) } else { plugin.Log.Warnf("Unable to start status check for etcd") } + if err != nil && plugin.config.AllowDelayedStart { + // If the connection cannot be established during init, keep trying in another goroutine (if allowed) and + // end the init + go plugin.etcdReconnectionLoop(etcdClientCfg) + return nil + } else if err != nil { + // If delayed start is not allowed, return error + return fmt.Errorf("error connecting to ETCD: %v", err) + } + + // If successful, configure and return + plugin.configureConnection() + + // Mark plugin as connected at this point + plugin.connected = true return nil } @@ -137,6 +137,25 @@ func (plugin *Plugin) Disabled() (disabled bool) { return plugin.disabled } +// OnConnect executes callback if plugin is connected, or gathers functions from all plugin with ETCD as dependency +func (plugin *Plugin) OnConnect(callback func() error) { + plugin.Lock() + defer plugin.Unlock() + + if plugin.connected { + if err := callback(); err != nil { + plugin.Log.Error(err) + } + } else { + plugin.onConnection = append(plugin.onConnection, callback) + } +} + +// GetPluginName returns name of the plugin +func (plugin *Plugin) GetPluginName() infra.PluginName { + return plugin.PluginName +} + // PutIfNotExists puts given key-value pair into etcd if there is no value set for the key. If the put was successful // succeeded is true. If the key already exists succeeded is false and the value for the key is untouched. func (plugin *Plugin) PutIfNotExists(key string, value []byte) (succeeded bool, err error) { @@ -154,18 +173,97 @@ func (plugin *Plugin) Compact(rev ...int64) (toRev int64, err error) { return 0, fmt.Errorf("connection is not established") } -func (plugin *Plugin) getEtcdConfig() (Config, error) { +// Method starts loop which attempt to connect to the ETCD. If successful, send signal callback with resync, +// which will be started when datasync confirms successful registration +func (plugin *Plugin) etcdReconnectionLoop(clientCfg *ClientConfig) { + var err error + // Set reconnect interval + interval := plugin.config.ReconnectInterval + if interval == 0 { + interval = defaultReconnectInterval + } + plugin.Log.Infof("ETCD server %s not reachable in init phase. Agent will continue to try to connect every %d second(s)", + plugin.config.Endpoints, interval) + for { + time.Sleep(interval) + + plugin.Log.Infof("Connecting to ETCD %v ...", plugin.config.Endpoints) + plugin.connection, err = NewEtcdConnectionWithBytes(*clientCfg, plugin.Log) + if err != nil { + continue + } + plugin.setupPostInitConnection() + return + } +} + +func (plugin *Plugin) setupPostInitConnection() { + plugin.Log.Infof("ETCD server %s connected", plugin.config.Endpoints) + + plugin.Lock() + defer plugin.Unlock() + + // Configure connection and set as connected + plugin.configureConnection() + plugin.connected = true + // Execute callback functions (if any) + for _, callback := range plugin.onConnection { + if err := callback(); err != nil { + plugin.Log.Error(err) + } + } + // Call resync if any callback was executed. Otherwise there is nothing to resync + if plugin.Resync != nil && len(plugin.onConnection) > 0 { + plugin.Resync.DoResync() + } + plugin.Log.Debugf("Etcd reconnection loop ended") +} + +// If ETCD is connected, complete all other procedures +func (plugin *Plugin) configureConnection() { + if plugin.config.AutoCompact > 0 { + if plugin.config.AutoCompact < time.Duration(time.Minute*60) { + plugin.Log.Warnf("Auto compact option for ETCD is set to less than 60 minutes!") + } + plugin.startPeriodicAutoCompact(plugin.config.AutoCompact) + } + plugin.protoWrapper = kvproto.NewProtoWrapperWithSerializer(plugin.connection, &keyval.SerializerJSON{}) +} + +// ETCD status check probe function +func (plugin *Plugin) statusCheckProbe() (statuscheck.PluginState, error) { + if plugin.connection == nil { + plugin.connected = false + return statuscheck.Error, fmt.Errorf("no ETCD connection available") + } + if _, _, _, err := plugin.connection.GetValue(healthCheckProbeKey); err != nil { + plugin.lastConnErr = err + plugin.connected = false + return statuscheck.Error, err + } + if plugin.config.ReconnectResync && plugin.lastConnErr != nil { + if plugin.Resync != nil { + plugin.Resync.DoResync() + plugin.lastConnErr = nil + } else { + plugin.Log.Warn("Expected resync after ETCD reconnect could not start beacuse of missing Resync plugin") + } + } + plugin.connected = true + return statuscheck.OK, nil +} + +func (plugin *Plugin) getEtcdConfig() (*Config, error) { var etcdCfg Config found, err := plugin.PluginConfig.GetValue(&etcdCfg) if err != nil { - return etcdCfg, err + return nil, err } if !found { plugin.Log.Info("ETCD config not found, skip loading this plugin") plugin.disabled = true - return etcdCfg, nil } - return etcdCfg, nil + return &etcdCfg, nil } func (plugin *Plugin) startPeriodicAutoCompact(period time.Duration) { diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/plugin_api_keyval.go b/vendor/github.com/ligato/cn-infra/db/keyval/plugin_api_keyval.go index b871f11f67..4911b452d1 100644 --- a/vendor/github.com/ligato/cn-infra/db/keyval/plugin_api_keyval.go +++ b/vendor/github.com/ligato/cn-infra/db/keyval/plugin_api_keyval.go @@ -32,6 +32,12 @@ type KvProtoPlugin interface { // Disabled returns true if there was no configuration and therefore agent // started without connectivity to a particular data store. Disabled() bool + // OnConnect executes datasync callback if KV plugin is connected. If not, it gathers + // these functions from all plugins using the specific KV plugin as dependency and + // if delayed start is allowed, callbacks are executed after successful connection. + OnConnect(func() error) + // Returns key value store name. + String() string } // KvBytesPlugin provides unifying interface for different key-value datastore diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/proto_watcher_api.go b/vendor/github.com/ligato/cn-infra/db/keyval/proto_watcher_api.go index 6b8abc4181..b04b74eaa5 100644 --- a/vendor/github.com/ligato/cn-infra/db/keyval/proto_watcher_api.go +++ b/vendor/github.com/ligato/cn-infra/db/keyval/proto_watcher_api.go @@ -17,7 +17,6 @@ package keyval import ( "time" - "github.com/ligato/cn-infra/core" "github.com/ligato/cn-infra/datasync" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" @@ -48,14 +47,14 @@ func ToChanProto(ch chan ProtoWatchResp, opts ...interface{}) func(dto ProtoWatc timeout := datasync.DefaultNotifTimeout var logger logging.Logger = logrus.DefaultLogger() - for _, opt := range opts { + /*for _, opt := range opts { switch opt.(type) { case *core.WithLoggerOpt: logger = opt.(*core.WithLoggerOpt).Logger case *core.WithTimeoutOpt: timeout = opt.(*core.WithTimeoutOpt).Timeout } - } + }*/ return func(dto ProtoWatchResp) { select { diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/redis/README.md b/vendor/github.com/ligato/cn-infra/db/keyval/redis/README.md deleted file mode 100644 index 2cdb0ef8e3..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/keyval/redis/README.md +++ /dev/null @@ -1,151 +0,0 @@ -Redis is the implementation of the key-value Data Broker client -API for the Redis key-value data store. -See [cn-infra/db/keyval](../../../db/keyval) for the definition -of the key-value Data Broker client API. - -The entity BytesConnectionRedis provides access to CRUD as well as event -subscription API's. -``` - +-----+ (Broker) +------------------------+ --> CRUD +-------+ --> - | app | | BytesConnectionRedis | | Redis | - +-----+ <-- (KeyValProtoWatcher) +------------------------+ <-- events +-------+ -``` - -## How to use Redis -The code snippets below provide examples to help you get started. -For simplicity, error handling is omitted. - -#### Need to import following dependencies -``` - import "github.com/ligato/cn-infra/db/keyval/kvproto" - import "github.com/ligato/cn-infra/db/keyval/redis" - import "github.com/ligato/cn-infra/utils/config" - import "github.com/ligato/cn-infra/logging/logrus" -``` -#### Define client configuration based on your Redis installation. -- Single Node -var cfg redis.NodeConfig -- Sentinel Enabled Cluster -var cfg redis.SentinelConfig -- Redis Cluster -var cfg redis.ClusterConfig -- See sample YAML configurations [(*.yaml files)](../../../examples/redis-lib) - -You can initialize any of the above configuration instances in memory, -or load the settings from file using -``` - err = config.ParseConfigFromYamlFile(configFile, &cfg) -``` -You can also load any of the three configuration files using -``` - var cfg interface{} - cfg, err := redis.LoadConfig(configFile) -``` -#### Create connection from configuration -``` - client, err := redis.CreateClient(cfg) - db, err := redis.NewBytesConnection(client, logrus.DefaultLogger()) -``` -#### Create Brokers / Watchers from connection -``` - //create broker/watcher that share the same connection pools. - bytesBroker := db.NewBroker("some-prefix") - bytesWatcher := db.NewWatcher("some-prefix") - - // create broker/watcher that share the same connection pools, - // capable of processing protocol-buffer generated data. - wrapper := kvproto.NewProtoWrapper(db) - protoBroker := wrapper.NewBroker("some-prefix") - protoWatcher := wrapper.NewWatcher("some-prefix") -``` -#### Perform CRUD operations -``` - // put - err = db.Put("some-key", []byte("some-value")) - err = db.Put("some-temp-key", []byte("valid for 20 seconds"), - datasync.WithTTL(20*time.Second)) - - // get - value, found, revision, err := db.GetValue("some-key") - if found { - ... - } - - // Note: flight.Info implements proto.Message. - f := flight.Info{ - Airline: "UA", - Number: 1573, - Priority: 1, - } - err = protoBroker.Put("some-key-prefix", &f) - f2 := flight.Info{} - found, revision, err = protoBroker.GetValue("some-key-prefix", &f2) - - // list - keyPrefix := "some" - kv, err := db.ListValues(keyPrefix) - for { - kv, done := kv.GetNext() - if done { - break - } - key := kv.GetKey() - value := kv.GetValue() - } - - // delete - found, err := db.Delete("some-key") - // or, delete all keys matching the prefix "some-key". - found, err := db.Delete("some-key", datasync.WithPrefix()) - - // transaction - var txn keyval.BytesTxn = db.NewTxn() - txn.Put("key101", []byte("val 101")).Put("key102", []byte("val 102")) - txn.Put("key103", []byte("val 103")).Put("key104", []byte("val 104")) - err := txn.Commit() -``` -#### Subscribe to key space events -``` - watchChan := make(chan keyval.BytesWatchResp, 10) - err = db.Watch(watchChan, "some-key") - for { - select { - case r := <-watchChan: - switch r.GetChangeType() { - case datasync.Put: - log.Infof("KeyValProtoWatcher received %v: %s=%s", r.GetChangeType(), - r.GetKey(), string(r.GetValue())) - case datasync.Delete: - ... - } - ... - } - } -``` - NOTE: You must configure Redis for it to publish key space events. -``` - config SET notify-keyspace-events KA -``` -See [EVENT NOTIFICATION](https://raw.githubusercontent.com/antirez/redis/3.2/redis.conf) -for more details. - -You can find detailed examples in -- [simple](../../../examples/redis-lib/simple) -- [airport](../../../examples/redis-lib/airport) - -#### Resiliency -Connection/read/write time-outs, failover, reconnection and recovery -are validated by running the airport example against a Redis Sentinel -Cluster. Redis nodes are paused selectively to simulate server down: -``` -$ docker-compose ps -``` -|Name |Command |State|Ports| -|-------|---------|-----|-----| -|dockerredissentinel_master_1 | docker-entrypoint.sh redis ... | Paused | 6379/tcp | -|dockerredissentinel_slave_1 | docker-entrypoint.sh redis ... | Up | 6379/tcp | -|dockerredissentinel_slave_2 | docker-entrypoint.sh redis ... | Up | 6379/tcp | -|dockerredissentinel_sentinel_1 | sentinel-entrypoint.sh | Up | 26379/tcp, 6379/tcp | -|dockerredissentinel_sentinel_2 | sentinel-entrypoint.sh | Up | 26379/tcp, 6379/tcp | -|dockerredissentinel_sentinel_3 | sentinel-entrypoint.sh | Up | 26379/tcp, 6379/tcp | - diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/redis/bytes_broker_impl.go b/vendor/github.com/ligato/cn-infra/db/keyval/redis/bytes_broker_impl.go deleted file mode 100755 index ef5c79c861..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/keyval/redis/bytes_broker_impl.go +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package redis - -import ( - "fmt" - "strings" - "time" - - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/utils/safeclose" -) - -// BytesConnectionRedis allows to store, read and watch values from Redis. -type BytesConnectionRedis struct { - logging.Logger - client Client - - // closeCh will be closed when this connection is closed, i.e. by the Close() method. - // It is used to give go routines a signal to stop. - closeCh chan string - - // Flag to indicate whether this connection is closed. - closed bool -} - -// bytesKeyIterator is an iterator returned by ListKeys call. -type bytesKeyIterator struct { - index int - keys []string - db *BytesConnectionRedis - pattern string - cursor uint64 - trimPrefix func(key string) string - err error -} - -// bytesKeyValIterator is an iterator returned by ListValues call. -type bytesKeyValIterator struct { - values [][]byte - bytesKeyIterator -} - -// bytesKeyVal represents a single key-value pair. -type bytesKeyVal struct { - key string - value []byte - prevValue []byte -} - -// NewBytesConnection creates a new instance of BytesConnectionRedis using the provided -// Client (be it node, or cluster, or sentinel client). -func NewBytesConnection(client Client, log logging.Logger) (*BytesConnectionRedis, error) { - return &BytesConnectionRedis{log, client, make(chan string), false}, nil -} - -// Close closes the connection to redis. -func (db *BytesConnectionRedis) Close() error { - if db.closed { - db.Debug("Close() called on a closed connection") - return nil - } - db.Debug("Close()") - db.closed = true - safeclose.Close(db.closeCh) - if db.client != nil { - err := safeclose.Close(db.client) - if err != nil { - return fmt.Errorf("Close() encountered error: %s", err) - } - } - return nil -} - -// NewTxn creates new transaction. -func (db *BytesConnectionRedis) NewTxn() keyval.BytesTxn { - if db.closed { - db.Error("NewTxn() called on a closed connection") - return nil - } - db.Debug("NewTxn()") - - return &Txn{db: db, ops: []op{}, addPrefix: nil} -} - -// Put sets the key/value in Redis data store. Replaces value if the key already exists. -func (db *BytesConnectionRedis) Put(key string, data []byte, opts ...datasync.PutOption) error { - if db.closed { - return fmt.Errorf("Put(%s) called on a closed connection", key) - } - db.Debugf("Put(%s)", key) - - var ttl time.Duration - for _, o := range opts { - if withTTL, ok := o.(*datasync.WithTTLOpt); ok && withTTL.TTL > 0 { - ttl = withTTL.TTL - } - } - err := db.client.Set(key, data, ttl).Err() - if err != nil { - return fmt.Errorf("Set(%s) failed: %s", key, err) - } - return nil -} - -// GetValue retrieves the value of the key from Redis. -func (db *BytesConnectionRedis) GetValue(key string) (data []byte, found bool, revision int64, err error) { - if db.closed { - return nil, false, 0, fmt.Errorf("GetValue(%s) called on a closed connection", key) - } - db.Debugf("GetValue(%s)", key) - - statusCmd := db.client.Get(key) - data, err = statusCmd.Bytes() - if err != nil { - if err == GoRedisNil { - return data, false, 0, nil - } - return nil, false, 0, fmt.Errorf("Get(%s) failed: %s", key, err) - } - return data, true, 0, nil -} - -// ListKeys returns an iterator used to traverse keys that start with the given match string. -// When done traversing, you must close the iterator by calling its Close() method. -func (db *BytesConnectionRedis) ListKeys(match string) (keyval.BytesKeyIterator, error) { - if db.closed { - return nil, fmt.Errorf("ListKeys(%s) called on a closed connection", match) - } - return listKeys(db, match, nil, nil) -} - -// ListValues returns an iterator used to traverse key value pairs for all the keys that start with the given match string. -// When done traversing, you must close the iterator by calling its Close() method. -func (db *BytesConnectionRedis) ListValues(match string) (keyval.BytesKeyValIterator, error) { - if db.closed { - return nil, fmt.Errorf("ListValues(%s) called on a closed connection", match) - } - return listValues(db, match, nil, nil) -} - -// Delete deletes all the keys that start with the given match string. -func (db *BytesConnectionRedis) Delete(key string, opts ...datasync.DelOption) (found bool, err error) { - if db.closed { - return false, fmt.Errorf("Delete(%s) called on a closed connection", key) - } - db.Debugf("Delete(%s)", key) - - keysToDelete := []string{} - - var keyIsPrefix bool - for _, o := range opts { - if _, ok := o.(*datasync.WithPrefixOpt); ok { - keyIsPrefix = true - } - } - if keyIsPrefix { - iterator, err := db.ListKeys(key) - if err != nil { - return false, err - } - for { - k, _, last := iterator.GetNext() - if last { - break - } - keysToDelete = append(keysToDelete, k) - } - if len(keysToDelete) == 0 { - return false, nil - } - db.Debugf("Delete(%s): deleting %v", key, keysToDelete) - } else { - keysToDelete = append(keysToDelete, key) - } - - intCmd := db.client.Del(keysToDelete...) - if intCmd.Err() != nil { - return false, fmt.Errorf("Delete(%s) failed: %s", key, intCmd.Err()) - } - return (intCmd.Val() != 0), nil -} - -// Close closes the iterator. It returns either an error (if any occurs), or nil. -func (it *bytesKeyIterator) Close() error { - return it.err -} - -// GetNext returns the next item from the iterator. -// If the iterator encounters an error or has reached the last item previously, lastReceived is set to true. -func (it *bytesKeyIterator) GetNext() (key string, rev int64, lastReceived bool) { - if it.err != nil { - return "", 0, true - } - if it.index >= len(it.keys) { - if it.cursor == 0 { - return "", 0, true - } - var err error - it.keys, it.cursor, err = scanKeys(it.db, it.pattern, it.cursor) - if err != nil { - it.err = err - it.db.Errorf("GetNext() failed: %s (pattern %s)", err.Error(), it.pattern) - return "", 0, true - } - if len(it.keys) == 0 { - return "", 0, it.cursor == 0 - } - it.index = 0 - } - - key = it.keys[it.index] - if it.trimPrefix != nil { - key = it.trimPrefix(key) - } - it.index++ - - return key, 0, false -} - -// Close closes the iterator. It returns either an error (if it occurs), or nil. -func (it *bytesKeyValIterator) Close() error { - return it.err -} - -// GetNext returns the next item from the iterator. -// If the iterator encounters an error or has reached the last item previously, lastReceived is set to true. -func (it *bytesKeyValIterator) GetNext() (kv keyval.BytesKeyVal, lastReceived bool) { - if it.err != nil { - return nil, true - } - if it.index >= len(it.values) { - if it.cursor == 0 { - return nil, true - } - var err error - it.keys, it.cursor, err = scanKeys(it.db, it.pattern, it.cursor) - if err != nil { - it.err = err - it.db.Errorf("GetNext() failed: %s (pattern %s)", err.Error(), it.pattern) - return nil, true - } - if len(it.keys) == 0 { - return nil, it.cursor == 0 - } - it.values, err = getValues(it.db, it.keys) - if err != nil { - it.err = err - it.db.Errorf("GetNext() failed: %s (pattern %s)", err.Error(), it.pattern) - return nil, true - } - it.index = 0 - } - - key := it.keys[it.index] - if it.trimPrefix != nil { - key = it.trimPrefix(key) - } - - value := it.values[it.index] - var prevValue []byte - if it.index > 0 { - prevValue = it.values[it.index-1] - } - - kv = &bytesKeyVal{key, value, prevValue} - it.index++ - - return kv, false -} - -// GetValue returns the value of the pair. -func (kv *bytesKeyVal) GetValue() []byte { - return kv.value -} - -// GetPrevValue returns the previous value of the pair. -func (kv *bytesKeyVal) GetPrevValue() []byte { - return kv.prevValue -} - -// GetKey returns the key of the pair. -func (kv *bytesKeyVal) GetKey() string { - return kv.key -} - -// GetRevision returns the revision associated with the pair. -func (kv *bytesKeyVal) GetRevision() int64 { - return 0 -} - -func listKeys(db *BytesConnectionRedis, match string, - addPrefix func(key string) string, trimPrefix func(key string) string) (keyval.BytesKeyIterator, error) { - pattern := match - if addPrefix != nil { - pattern = addPrefix(pattern) - } - pattern = wildcard(pattern) - db.Debugf("listKeys(%s): pattern %s", match, pattern) - - keys, cursor, err := scanKeys(db, pattern, 0) - if err != nil { - return nil, err - } - return &bytesKeyIterator{ - index: 0, - keys: keys, - db: db, - pattern: pattern, - cursor: cursor, - trimPrefix: trimPrefix}, nil -} - -func listValues(db *BytesConnectionRedis, match string, - addPrefix func(key string) string, trimPrefix func(key string) string) (keyval.BytesKeyValIterator, error) { - keyIterator, err := listKeys(db, match, addPrefix, trimPrefix) - if err != nil { - return nil, err - } - bkIterator := keyIterator.(*bytesKeyIterator) - values, err := getValues(db, bkIterator.keys) - if err != nil { - return nil, err - } - return &bytesKeyValIterator{ - values: values, - bytesKeyIterator: *bkIterator}, nil -} - -func scanKeys(db *BytesConnectionRedis, pattern string, cursor uint64) (keys []string, next uint64, err error) { - for { - // count == 0 defaults to Redis default. See https://redis.io/commands/scan. - keys, next, err = db.client.Scan(cursor, pattern, 0).Result() - if err != nil { - db.Errorf("Scan(%s) failed: %s", pattern, err) - return keys, next, err - } - if keys == nil { - keys = []string{} - } - count := len(keys) - if count > 0 || next == 0 { - db.Debugf("scanKeys(%s): got %d keys @ cursor %d (next cursor %d)", pattern, count, cursor, next) - return keys, next, nil - } - cursor = next - } -} - -func getValues(db *BytesConnectionRedis, keys []string) (values [][]byte, err error) { - db.Debugf("getValues(%v)", keys) - - if len(keys) == 0 { - return [][]byte{}, nil - } - - sliceCmd := db.client.MGet(keys...) - if sliceCmd.Err() != nil { - return nil, fmt.Errorf("MGet(%v) failed: %s", keys, sliceCmd.Err()) - } - vals := sliceCmd.Val() - values = make([][]byte, len(vals)) - for i, v := range vals { - switch o := v.(type) { - case string: - values[i] = []byte(o) - case []byte: - values[i] = o - case nil: - values[i] = nil - } - } - return values, nil -} - -// ListValuesRange returns an iterator used to traverse values stored under the provided key. -// TODO: Not in BytesBroker interface -/* -func (db *BytesConnectionRedis) ListValuesRange(fromPrefix string, toPrefix string) (keyval.BytesKeyValIterator, error) { - db.Panic("Not implemented") - return nil, nil -} -*/ - -/////////////////////////////////////////////////////////////////////////////////////////////////// - -// BytesBrokerWatcherRedis uses BytesConnectionRedis to access the datastore. -// The connection can be shared among multiple BytesBrokerWatcherRedis. -// BytesBrokerWatcherRedis allows to define a keyPrefix that is prepended to -// all keys in its methods in order to shorten keys used in arguments. -type BytesBrokerWatcherRedis struct { - logging.Logger - prefix string - delegate *BytesConnectionRedis - - // closeCh is a channel closed when Close method of data broker is closed. - // It is used for giving go routines a signal to stop. - closeCh chan string -} - -// NewBrokerWatcher creates a new CRUD + KeyValProtoWatcher proxy instance to redis using BytesConnectionRedis. -// The given prefix will be prepended to key argument in all calls. -// Specify empty string ("") if not wanting to use prefix. -func (db *BytesConnectionRedis) NewBrokerWatcher(prefix string) *BytesBrokerWatcherRedis { - return &BytesBrokerWatcherRedis{db.Logger, prefix, db, db.closeCh} -} - -// NewBroker creates a new CRUD proxy instance to redis using BytesConnectionRedis. -// The given prefix will be prepended to key argument in all calls. -// Specify empty string ("") if not wanting to use prefix. -func (db *BytesConnectionRedis) NewBroker(prefix string) keyval.BytesBroker { - return db.NewBrokerWatcher(prefix) -} - -// NewWatcher creates a new KeyValProtoWatcher proxy instance to redis using BytesConnectionRedis. -// The given prefix will be prepended to key argument in all calls. -// Specify empty string ("") if not wanting to use prefix. -func (db *BytesConnectionRedis) NewWatcher(prefix string) keyval.BytesWatcher { - return db.NewBrokerWatcher(prefix) -} - -func (pdb *BytesBrokerWatcherRedis) addPrefix(key string) string { - return pdb.prefix + key -} - -func (pdb *BytesBrokerWatcherRedis) trimPrefix(key string) string { - return strings.TrimPrefix(key, pdb.prefix) -} - -// GetPrefix returns the prefix associated with this BytesBrokerWatcherRedis. -func (pdb *BytesBrokerWatcherRedis) GetPrefix() string { - return pdb.prefix -} - -// NewTxn creates new transaction. Prefix will be prepended to the key argument. -func (pdb *BytesBrokerWatcherRedis) NewTxn() keyval.BytesTxn { - if pdb.delegate.closed { - pdb.Error("NewTxn() called on a closed connection") - return nil - } - pdb.Debug("NewTxn()") - - return &Txn{db: pdb.delegate, ops: []op{}, addPrefix: pdb.addPrefix} -} - -// Put calls Put function of BytesConnectionRedis. Prefix will be prepended to the key argument. -func (pdb *BytesBrokerWatcherRedis) Put(key string, data []byte, opts ...datasync.PutOption) error { - if pdb.delegate.closed { - return fmt.Errorf("Put(%s) called on a closed connection", key) - } - pdb.Debugf("Put(%s)", key) - - return pdb.delegate.Put(pdb.addPrefix(key), data, opts...) -} - -// GetValue calls GetValue function of BytesConnectionRedis. -// Prefix will be prepended to the key argument when searching. -func (pdb *BytesBrokerWatcherRedis) GetValue(key string) (data []byte, found bool, revision int64, err error) { - if pdb.delegate.closed { - return nil, false, 0, fmt.Errorf("GetValue(%s) called on a closed connection", key) - } - pdb.Debugf("GetValue(%s)", key) - - return pdb.delegate.GetValue(pdb.addPrefix(key)) -} - -// ListKeys calls ListKeys function of BytesConnectionRedis. -// Prefix will be prepended to key argument when searching. -// The returned keys, however, will have the prefix trimmed. -// When done traversing, you must close the iterator by calling its Close() method. -func (pdb *BytesBrokerWatcherRedis) ListKeys(match string) (keyval.BytesKeyIterator, error) { - if pdb.delegate.closed { - return nil, fmt.Errorf("ListKeys(%s) called on a closed connection", match) - } - return listKeys(pdb.delegate, match, pdb.addPrefix, pdb.trimPrefix) -} - -// ListValues calls ListValues function of BytesConnectionRedis. -// Prefix will be prepended to key argument when searching. -// The returned keys, however, will have the prefix trimmed. -// When done traversing, you must close the iterator by calling its Close() method. -func (pdb *BytesBrokerWatcherRedis) ListValues(match string) (keyval.BytesKeyValIterator, error) { - if pdb.delegate.closed { - return nil, fmt.Errorf("ListValues(%s) called on a closed connection", match) - } - return listValues(pdb.delegate, match, pdb.addPrefix, pdb.trimPrefix) -} - -// Delete calls Delete function of BytesConnectionRedis. -// Prefix will be prepended to key argument when searching. -func (pdb *BytesBrokerWatcherRedis) Delete(match string, opts ...datasync.DelOption) (found bool, err error) { - if pdb.delegate.closed { - return false, fmt.Errorf("Delete(%s) called on a closed connection", match) - } - pdb.Debugf("Delete(%s)", match) - - return pdb.delegate.Delete(pdb.addPrefix(match), opts...) -} - -// ListValuesRange calls ListValuesRange function of BytesConnectionRedis. -// Prefix will be prepended to key argument when searching. -// TODO: Not in BytesBroker interface -/* -func (pdb *BytesBrokerWatcherRedis) ListValuesRange(fromPrefix string, toPrefix string) (keyval.BytesKeyValIterator, error) { - return pdb.delegate.ListValuesRange(pdb.addPrefix(fromPrefix), pdb.addPrefix(toPrefix)) -} -*/ - -const redisWildcardChars = "*?[]" - -func wildcard(match string) string { - containsWildcard := strings.ContainsAny(match, redisWildcardChars) - if !containsWildcard { - return match + "*" //prefix - } - return match -} diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/redis/bytes_txn_impl.go b/vendor/github.com/ligato/cn-infra/db/keyval/redis/bytes_txn_impl.go deleted file mode 100755 index 51668847c4..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/keyval/redis/bytes_txn_impl.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package redis - -import ( - "fmt" - "strings" - - goredis "github.com/go-redis/redis" - "github.com/howeyc/crc16" - "github.com/ligato/cn-infra/db/keyval" -) - -type op struct { - key string - value []byte - del bool -} - -// Txn allows to group operations into the transaction. Transaction executes multiple operations -// in a more efficient way in contrast to executing them one by one. -type Txn struct { - db *BytesConnectionRedis - ops []op - addPrefix func(key string) string -} - -// Put adds a new 'put' operation to a previously created transaction. -// If the key does not exist in the data store, a new key-value item -// will be added to the data store. If the key exists in the data store, -// the existing value will be overwritten with the value from this -// operation. -func (tx *Txn) Put(key string, value []byte) keyval.BytesTxn { - if tx.addPrefix != nil { - key = tx.addPrefix(key) - } - tx.ops = append(tx.ops, op{key, value, false}) - return tx -} - -// Delete adds a new 'delete' operation to a previously created -// transaction. -func (tx *Txn) Delete(key string) keyval.BytesTxn { - if tx.addPrefix != nil { - key = tx.addPrefix(key) - } - tx.ops = append(tx.ops, op{key, nil, true}) - return tx -} - -// Commit commits all operations in a transaction to the data store. -// Commit is atomic - either all operations in the transaction are -// committed to the data store, or none of them. -func (tx *Txn) Commit() (err error) { - if tx.db.closed { - return fmt.Errorf("Commit() called on a closed connection") - } - tx.db.Debug("Commit()") - - if len(tx.ops) == 0 { - return nil - } - - // go-redis - - pipeline := tx.db.client.TxPipeline() - for _, op := range tx.ops { - if op.del { - pipeline.Del(op.key) - } else { - pipeline.Set(op.key, op.value, 0) - } - } - _, err = pipeline.Exec() - if err != nil { - // Redis cluster won't let you run multi-key commands in case of cross slot. - // - Cross slot check may be useful indicator in case of failure. - if _, yes := tx.db.client.(*goredis.ClusterClient); yes { - checkCrossSlot(tx) - } - return fmt.Errorf("%T.Exec() failed: %s", pipeline, err) - } - return nil -} - -// CROSSSLOT Keys in request don't hash to the same slot -// https://stackoverflow.com/questions/38042629/redis-cross-slot-error -// https://redis.io/topics/cluster-spec#keys-hash-tags -// https://redis.io/topics/cluster-tutorial -// "Redis Cluster supports multiple key operations as long as all the keys involved into a single -// command execution (or whole transaction, or Lua script execution) all belong to the same hash -// slot. The user can force multiple keys to be part of the same hash slot by using a concept -// called hash tags." -func checkCrossSlot(tx *Txn) bool { - var hashSlot uint16 - var key string - - for _, op := range tx.ops { - if hashSlot == 0 { - hashSlot = getHashSlot(op.key) - key = op.key - } else { - slot := getHashSlot(op.key) - if slot != hashSlot { - tx.db.Warnf("%T: Found CROSS SLOT keys (%s, %d) and (%s, %d)", - *tx, key, hashSlot, op.key, slot) - return true - } - } - } - return false -} - -func getHashSlot(key string) uint16 { - var tag string - start := strings.Index(key, "{") - if start != -1 { - start++ - tagSlice := key[start:] - end := strings.Index(tagSlice, "}") - if end != -1 { - tag = tagSlice[:end] - } - } - const redisHashSlotCount = 16384 - return crc16.ChecksumCCITT([]byte(tag)) % redisHashSlotCount -} diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/redis/bytes_watcher_impl.go b/vendor/github.com/ligato/cn-infra/db/keyval/redis/bytes_watcher_impl.go deleted file mode 100755 index 5144dea58b..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/keyval/redis/bytes_watcher_impl.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package redis - -import ( - "fmt" - "strings" - - goredis "github.com/go-redis/redis" - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/cn-infra/utils/safeclose" -) - -const keySpaceEventPrefix = "__keyspace@*__:" - -// BytesWatchPutResp is sent when new key-value pair has been inserted or the value is updated. -type BytesWatchPutResp struct { - key string - value []byte - prevValue []byte - rev int64 // TODO Does Redis data have revision? -} - -// NewBytesWatchPutResp creates an instance of BytesWatchPutResp. -func NewBytesWatchPutResp(key string, value []byte, prevValue []byte, revision int64) *BytesWatchPutResp { - return &BytesWatchPutResp{key: key, value: value, prevValue: prevValue, rev: revision} -} - -// GetChangeType returns "Put" for BytesWatchPutResp. -func (resp *BytesWatchPutResp) GetChangeType() datasync.PutDel { - return datasync.Put -} - -// GetKey returns the key that has been inserted. -func (resp *BytesWatchPutResp) GetKey() string { - return resp.key -} - -// GetValue returns the value that has been inserted. -func (resp *BytesWatchPutResp) GetValue() []byte { - return resp.value -} - -// GetPrevValue returns the value that has been inserted. -func (resp *BytesWatchPutResp) GetPrevValue() []byte { - return resp.prevValue -} - -// GetRevision returns the revision associated with create action. -func (resp *BytesWatchPutResp) GetRevision() int64 { - return resp.rev -} - -// BytesWatchDelResp is sent when a key-value pair has been removed. -type BytesWatchDelResp struct { - key string - rev int64 // TODO Does Redis data have revision? -} - -// NewBytesWatchDelResp creates an instance of BytesWatchDelResp. -func NewBytesWatchDelResp(key string, revision int64) *BytesWatchDelResp { - return &BytesWatchDelResp{key: key, rev: revision} -} - -// GetChangeType returns "Delete" for BytesWatchPutResp. -func (resp *BytesWatchDelResp) GetChangeType() datasync.PutDel { - return datasync.Delete -} - -// GetKey returns the key that has been deleted. -func (resp *BytesWatchDelResp) GetKey() string { - return resp.key -} - -// GetValue returns nil for BytesWatchDelResp. -func (resp *BytesWatchDelResp) GetValue() []byte { - return nil -} - -// GetPrevValue returns nil for BytesWatchDelResp -func (resp *BytesWatchDelResp) GetPrevValue() []byte { - return nil -} - -// GetRevision returns the revision associated with the delete operation. -func (resp *BytesWatchDelResp) GetRevision() int64 { - return resp.rev -} - -// Watch starts subscription for changes associated with the selected key. Watch events will be delivered to respChan. -// Subscription can be canceled by StopWatch call. -func (db *BytesConnectionRedis) Watch(resp func(keyval.BytesWatchResp), closeChan chan string, keys ...string) error { - if db.closed { - return fmt.Errorf("watch(%v) called on a closed connection", keys) - } - db.closeCh = closeChan - - return watch(db, resp, db.closeCh, nil, nil, keys...) -} - -func watch(db *BytesConnectionRedis, resp func(keyval.BytesWatchResp), closeChan <-chan string, - addPrefix func(key string) string, trimPrefix func(key string) string, keys ...string) error { - patterns := make([]string, len(keys)) - for i, k := range keys { - if addPrefix != nil { - k = addPrefix(k) - } - patterns[i] = keySpaceEventPrefix + wildcard(k) - } - pubSub := db.client.PSubscribe(patterns...) - startWatch(db, pubSub, resp, trimPrefix, patterns...) - go func() { - _, active := <-closeChan - if !active { - db.Debugf("Received signal to close Watch(%v)", patterns) - if !db.closed { - err := pubSub.PUnsubscribe(patterns...) - if err != nil { - db.Errorf("PUnsubscribe %v failed: %s", patterns, err) - } - safeclose.Close(pubSub) - } - } - }() - return nil -} - -func startWatch(db *BytesConnectionRedis, pubSub *goredis.PubSub, - resp func(keyval.BytesWatchResp), trimPrefix func(key string) string, patterns ...string) { - go func() { - defer func() { db.Debugf("Watch(%v) exited", patterns) }() - db.Debugf("start Watch(%v)", patterns) - // to store previous value - var prevVal []byte - for { - msg, err := pubSub.ReceiveMessage() - if db.closed { - return - } - if err != nil { - db.Errorf("Watch(%v) encountered error: %s", patterns, err) - continue - } - if msg == nil { - // channel closed? - db.Debugf("%T.ReceiveMessage() returned nil", pubSub) - continue - } - db.Debugf("Receive %T: %s %s %s", msg, msg.Pattern, msg.Channel, msg.Payload) - key := msg.Channel[strings.Index(msg.Channel, ":")+1:] - db.Debugf("key = %s", key) - switch msg.Payload { - case "set": - // keyspace event does not carry value. Need to retrieve it. - val, _, rev, err := db.GetValue(key) - if err != nil { - db.Errorf("GetValue(%s) failed with error %s", key, err) - } - if val == nil { - db.Debugf("GetValue(%s) returned nil", key) - } - if trimPrefix != nil { - key = trimPrefix(key) - } - resp(NewBytesWatchPutResp(key, val, prevVal, rev)) - prevVal = val - case "del", "expired": - if trimPrefix != nil { - key = trimPrefix(key) - } - resp(NewBytesWatchDelResp(key, 0)) - default: - db.Debugf("%T: %s %s %s -- not handled", msg, msg.Pattern, msg.Channel, msg.Payload) - } - } - }() -} - -// Watch starts subscription for changes associated with the selected key. Watch events will be delivered to respChan. -func (pdb *BytesBrokerWatcherRedis) Watch(resp func(keyval.BytesWatchResp), closeChan chan string, keys ...string) error { - if pdb.delegate.closed { - return fmt.Errorf("watch(%v) called on a closed connection", keys) - } - return watch(pdb.delegate, resp, closeChan, pdb.addPrefix, pdb.trimPrefix, keys...) -} diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/redis/config.go b/vendor/github.com/ligato/cn-infra/db/keyval/redis/config.go deleted file mode 100755 index c694fea5bd..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/keyval/redis/config.go +++ /dev/null @@ -1,371 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package redis - -import ( - "io/ioutil" - "time" - - "crypto/tls" - "crypto/x509" - "fmt" - - "github.com/coreos/etcd/pkg/tlsutil" - "github.com/ghodss/yaml" - goredis "github.com/go-redis/redis" -) - -// TLS configures Transport layer security properties. -type TLS struct { - Enabled bool `json:"enabled"` // enable/disable TLS - SkipVerify bool `json:"skip-verify"` // whether to skip verification of server name & certificate - Certfile string `json:"cert-file"` // client certificate - Keyfile string `json:"key-file"` // client private key - CAfile string `json:"ca-file"` // certificate authority -} - -func createTLSConfig(config TLS) (*tls.Config, error) { - var ( - cert *tls.Certificate - cp *x509.CertPool - err error - ) - if config.Certfile != "" && config.Keyfile != "" { - cert, err = tlsutil.NewCert(config.Certfile, config.Keyfile, nil) - if err != nil { - return nil, fmt.Errorf("tlsutil.NewCert() failed: %s", err) - } - } - - if config.CAfile != "" { - cp, err = tlsutil.NewCertPool([]string{config.CAfile}) - if err != nil { - return nil, fmt.Errorf("tlsutil.NewCertPool() failed: %s", err) - } - } - - tlsConfig := &tls.Config{ - MinVersion: tls.VersionTLS10, - InsecureSkipVerify: config.SkipVerify, - RootCAs: cp, - } - if cert != nil { - tlsConfig.Certificates = []tls.Certificate{*cert} - } - - return tlsConfig, nil -} - -/////////////////////////////////////////////////////////////////////////////// -// go-redis https://github.com/go-redis/redis - -// GoRedisNil is error returned by go-redis when Redis replies with nil, -// .e.g. when key does not exist. -const GoRedisNil = goredis.Nil - -// Client is common interface used to adapt all types of Redis clients. -type Client interface { - // The easiest way to adapt Cmdable interface is just to embed it. - goredis.Cmdable - /* - But that means we'll have to mock each and every method in Cmdable for - unit tests, making it a whole lot more complicated. When the time comes, - it may be more manageable to only declare (duplicate) the methods - we need from Cmdable. As follows: - Del(keys ...string) *goredis.IntCmd - Get(key string) *goredis.StringCmd - MGet(keys ...string) *goredis.SliceCmd - MSet(pairs ...interface{}) *goredis.StatusCmd - Scan(cursor uint64, match string, count int64) *goredis.ScanCmd - Set(key string, value interface{}, expiration time.Duration) *goredis.StatusCmd - */ - - // Declare these additional methods to enable access to them through this - // interface. - Close() error - PSubscribe(channels ...string) *goredis.PubSub -} - -// ClientConfig is a configuration common to all types of Redis clients. -type ClientConfig struct { - // Password for authentication, if required. - Password string `json:"password"` - - // Dial timeout for establishing new connections. Default is 5 seconds. - DialTimeout time.Duration `json:"dial-timeout"` - - // Timeout for socket reads. If reached, commands will fail with a timeout - // instead of blocking. Default is 3 seconds. - ReadTimeout time.Duration `json:"read-timeout"` - - // Timeout for socket writes. If reached, commands will fail with a timeout - // instead of blocking. Default is ReadTimeout. - WriteTimeout time.Duration `json:"write-timeout"` - - // Connection pool configuration. - Pool PoolConfig `json:"pool"` -} - -// NodeConfig Node client configuration -type NodeConfig struct { - // host:port address of a Redis node - Endpoint string `json:"endpoint"` - - // Database to be selected after connecting to the server. - DB int `json:"db"` - - // Enables read-only queries on slave nodes. - EnableReadQueryOnSlave bool `json:"enable-query-on-slave"` - - // TLS configuration -- only applies to node client. - TLS TLS `json:"tls"` - - // Embedded common client configuration. - ClientConfig -} - -// ClusterConfig Cluster client configuration -type ClusterConfig struct { - // A seed list of host:port addresses of cluster nodes. - Endpoints []string `json:"endpoints"` - - // Enables read-only queries on slave nodes. - EnableReadQueryOnSlave bool `json:"enable-query-on-slave"` - - // The maximum number of redirects before giving up. - // Command is retried on network errors and MOVED/ASK redirects. Default is 16. - MaxRedirects int `json:"max-rediects"` - // Allows routing read-only commands to the closest master or slave node. - RouteByLatency bool `json:"route-by-latency"` - - ClientConfig -} - -// SentinelConfig Sentinel client configuration -type SentinelConfig struct { - // A seed list of host:port addresses sentinel nodes. - Endpoints []string `json:"endpoints"` - - // The sentinel master name. - MasterName string `json:"master-name"` - - // Database to be selected after connecting to the server. - DB int `json:"db"` - - ClientConfig -} - -// PoolConfig is a configuration of the go-redis connection pool. -type PoolConfig struct { - // Maximum number of socket connections. - // Default is 10 connections per every CPU as reported by runtime.NumCPU. - PoolSize int `json:"max-connections"` - // Amount of time, in seconds, a client waits for connection if all connections - // are busy before returning an error. - // Default is ReadTimeout + 1 second. - PoolTimeout time.Duration `json:"busy-timeout"` - // Amount of time, in seconds, after which a client closes idle connections. - // Should be less than server's timeout. - // Default is 5 minutes. - IdleTimeout time.Duration `json:"idle-timeout"` - // Frequency of idle checks. - // Default is 1 minute. - // When negative value is set, then idle check is disabled. - IdleCheckFrequency time.Duration `json:"idle-check-frequency"` -} - -// ConfigToClient creates an appropriate client according to the configuration -// parameter. -func ConfigToClient(config interface{}) (Client, error) { - switch cfg := config.(type) { - case NodeConfig: - return CreateNodeClient(cfg) - case ClusterConfig: - return CreateClusterClient(cfg) - case SentinelConfig: - return CreateSentinelClient(cfg) - case nil: - return nil, fmt.Errorf("Configuration cannot be nil") - } - return nil, fmt.Errorf("Unknown configuration type %T", config) -} - -// CreateNodeClient creates a client that will connect to a redis node, -// like master and/or slave. -func CreateNodeClient(config NodeConfig) (Client, error) { - var tlsConfig *tls.Config - if config.TLS.Enabled { - var err error - tlsConfig, err = createTLSConfig(config.TLS) - if err != nil { - return nil, err - } - } - return goredis.NewClient(&goredis.Options{ - Network: "tcp", - Addr: config.Endpoint, - - // Database to be selected after connecting to the server - DB: config.DB, - - // Enables read only queries on slave nodes. - /*ReadOnly: config.EnableReadQueryOnSlave,*/ - - // TLS Config to use. When set TLS will be negotiated. - TLSConfig: tlsConfig, - - // Optional password. Must match the password specified in the requirepass server configuration option. - Password: config.Password, - - // Dial timeout for establishing new connections. Default is 5 seconds. - DialTimeout: config.DialTimeout, - // Timeout for socket reads. If reached, commands will fail with a timeout instead of blocking. Default is 3 seconds. - ReadTimeout: config.ReadTimeout, - // Timeout for socket writes. If reached, commands will fail with a timeout instead of blocking. Default is ReadTimeout. - WriteTimeout: config.WriteTimeout, - - // Maximum number of socket connections. Default is 10 connections per every CPU as reported by runtime.NumCPU. - PoolSize: config.Pool.PoolSize, - // Amount of time a client waits for connection if all connections are busy before returning an error. Default is ReadTimeout + 1 second. - PoolTimeout: config.Pool.PoolTimeout, - // Amount of time after which a client closes idle connections. Should be less than server's timeout. Default is 5 minutes. - IdleTimeout: config.Pool.IdleTimeout, - // Frequency of idle checks. Default is 1 minute. When negative value is set, then idle check is disabled. - IdleCheckFrequency: config.Pool.IdleCheckFrequency, - - // Dialer creates new network connection and has priority over Network and Addr options. - // Dialer func() (net.Conn, error) - // Hook that is called when new connection is established - // OnConnect func(*Conn) error - - // Maximum number of retries before giving up. Default is to not retry failed commands. - MaxRetries: 0, - // Minimum backoff between each retry. Default is 8 milliseconds; -1 disables backoff. - MinRetryBackoff: 0, - // Maximum backoff between each retry. Default is 512 milliseconds; -1 disables backoff. - MaxRetryBackoff: 0, - }), nil -} - -// CreateClusterClient Creates a client that will connect to a redis cluster. -func CreateClusterClient(config ClusterConfig) (Client, error) { - return goredis.NewClusterClient(&goredis.ClusterOptions{ - Addrs: config.Endpoints, - - // Enables read only queries on slave nodes. - ReadOnly: config.EnableReadQueryOnSlave, - - MaxRedirects: config.MaxRedirects, - RouteByLatency: config.RouteByLatency, - - // Optional password. Must match the password specified in the requirepass server configuration option. - Password: config.Password, - - // Dial timeout for establishing new connections. Default is 5 seconds. - DialTimeout: config.DialTimeout, - // Timeout for socket reads. If reached, commands will fail with a timeout instead of blocking. Default is 3 seconds. - ReadTimeout: config.ReadTimeout, - // Timeout for socket writes. If reached, commands will fail with a timeout instead of blocking. Default is ReadTimeout. - WriteTimeout: config.WriteTimeout, - - // Maximum number of socket connections. Default is 10 connections per every CPU as reported by runtime.NumCPU. - PoolSize: config.Pool.PoolSize, - // Amount of time a client waits for connection if all connections are busy before returning an error. Default is ReadTimeout + 1 second. - PoolTimeout: config.Pool.PoolTimeout, - // Amount of time after which a client closes idle connections. Should be less than server's timeout. Default is 5 minutes. - IdleTimeout: config.Pool.IdleTimeout, - // Frequency of idle checks. Default is 1 minute. When negative value is set, then idle check is disabled. - IdleCheckFrequency: config.Pool.IdleCheckFrequency, - - // Maximum number of retries before giving up. Default is to not retry failed commands. - MaxRetries: 0, - // Minimum backoff between each retry. Default is 8 milliseconds; -1 disables backoff. - MinRetryBackoff: 0, - // Maximum backoff between each retry. Default is 512 milliseconds; -1 disables backoff. - MaxRetryBackoff: 0, - - // Hook that is called when new connection is established - // OnConnect func(*Conn) error - }), nil -} - -// CreateSentinelClient Creates a failover client that will connect to redis sentinels. -func CreateSentinelClient(config SentinelConfig) (Client, error) { - return goredis.NewFailoverClient(&goredis.FailoverOptions{ - SentinelAddrs: config.Endpoints, - - DB: config.DB, - - MasterName: config.MasterName, - - // Optional password. Must match the password specified in the requirepass server configuration option. - Password: config.Password, - - // Dial timeout for establishing new connections. Default is 5 seconds. - DialTimeout: config.DialTimeout, - // Timeout for socket reads. If reached, commands will fail with a timeout instead of blocking. Default is 3 seconds. - ReadTimeout: config.ReadTimeout, - // Timeout for socket writes. If reached, commands will fail with a timeout instead of blocking. Default is ReadTimeout. - WriteTimeout: config.WriteTimeout, - - // Maximum number of socket connections. Default is 10 connections per every CPU as reported by runtime.NumCPU. - PoolSize: config.Pool.PoolSize, - // Amount of time a client waits for connection if all connections are busy before returning an error. Default is ReadTimeout + 1 second. - PoolTimeout: config.Pool.PoolTimeout, - // Amount of time after which a client closes idle connections. Should be less than server's timeout. Default is 5 minutes. - IdleTimeout: config.Pool.IdleTimeout, - // Frequency of idle checks. Default is 1 minute. When negative value is set, then idle check is disabled. - IdleCheckFrequency: config.Pool.IdleCheckFrequency, - - // Maximum number of retries before giving up. Default is to not retry failed commands. - MaxRetries: 0, - - // Hook that is called when new connection is established - // OnConnect func(*Conn) error - }), nil -} - -// LoadConfig Loads the given configFile and returns appropriate config instance. -func LoadConfig(configFile string) (cfg interface{}, err error) { - b, err := ioutil.ReadFile(configFile) - if err != nil { - return nil, err - } - - var s SentinelConfig - err = yaml.Unmarshal(b, &s) - if err != nil { - return nil, err - } - if s.MasterName != "" { - return s, nil - } - - n := NodeConfig{} - err = yaml.Unmarshal(b, &n) - if err != nil { - return nil, err - } - if n.Endpoint != "" { - return n, nil - } - - c := ClusterConfig{} - err = yaml.Unmarshal(b, &c) - if err != nil { - return nil, err - } - return c, nil -} diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/redis/doc.go b/vendor/github.com/ligato/cn-infra/db/keyval/redis/doc.go deleted file mode 100644 index 49e9c86546..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/keyval/redis/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package redis is the implementation of the key-value Data Broker client -// API for the Redis key-value data store. See cn-infra/db/keyval for the -// definition of the key-value Data Broker client API. -package redis diff --git a/vendor/github.com/ligato/cn-infra/db/keyval/redis/plugin_impl_redis.go b/vendor/github.com/ligato/cn-infra/db/keyval/redis/plugin_impl_redis.go deleted file mode 100644 index b161623df1..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/keyval/redis/plugin_impl_redis.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package redis - -import ( - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/cn-infra/db/keyval/kvproto" - "github.com/ligato/cn-infra/flavors/local" - "github.com/ligato/cn-infra/health/statuscheck" -) - -const ( - // healthCheckProbeKey is a key used to probe Redis state. - healthCheckProbeKey = "probe-redis-connection" -) - -// Plugin implements redis plugin. -type Plugin struct { - Deps - // Plugin is disabled if there is no config file available - disabled bool - // Redis connection encapsulation - connection *BytesConnectionRedis - // Read/Write proto modelled data - protoWrapper *kvproto.ProtoWrapper -} - -// Deps lists dependencies of the redis plugin. -type Deps struct { - local.PluginInfraDeps //inject -} - -// Init retrieves redis configuration and establishes a new connection -// with the redis data store. -// If the configuration file doesn't exist or cannot be read, the returned error -// will be of os.PathError type. An untyped error is returned in case the file -// doesn't contain a valid YAML configuration. -func (plugin *Plugin) Init() (err error) { - redisCfg, err := plugin.getRedisConfig() - if err != nil || plugin.disabled { - return err - } - // Create client according to config - client, err := ConfigToClient(redisCfg) - if err != nil { - return err - } - // Uses config file to establish connection with the database - plugin.connection, err = NewBytesConnection(client, plugin.Log) - if err != nil { - return err - } - plugin.protoWrapper = kvproto.NewProtoWrapperWithSerializer(plugin.connection, &keyval.SerializerJSON{}) - - // Register for providing status reports (polling mode) - if plugin.StatusCheck != nil { - plugin.StatusCheck.Register(plugin.PluginName, func() (statuscheck.PluginState, error) { - _, _, err := plugin.NewBroker("/").GetValue(healthCheckProbeKey, nil) - if err == nil { - return statuscheck.OK, nil - } - return statuscheck.Error, err - }) - } else { - plugin.Log.Warnf("Unable to start status check for redis") - } - - return nil -} - -// Close does nothing for redis plugin. -func (plugin *Plugin) Close() error { - return nil -} - -// NewBroker creates new instance of prefixed broker that provides API with arguments of type proto.Message. -func (plugin *Plugin) NewBroker(keyPrefix string) keyval.ProtoBroker { - return plugin.protoWrapper.NewBroker(keyPrefix) -} - -// NewWatcher creates new instance of prefixed broker that provides API with arguments of type proto.Message. -func (plugin *Plugin) NewWatcher(keyPrefix string) keyval.ProtoWatcher { - return plugin.protoWrapper.NewWatcher(keyPrefix) -} - -// Disabled returns *true* if the plugin is not in use due to missing -// redis configuration. -func (plugin *Plugin) Disabled() (disabled bool) { - return plugin.disabled -} - -func (plugin *Plugin) getRedisConfig() (cfg interface{}, err error) { - found, _ := plugin.PluginConfig.GetValue(&struct{}{}) - if !found { - plugin.Log.Info("Redis config not found, skip loading this plugin") - plugin.disabled = true - return - } - configFile := plugin.PluginConfig.GetConfigName() - if configFile != "" { - cfg, err = LoadConfig(configFile) - if err != nil { - return - } - } - return -} diff --git a/vendor/github.com/ligato/cn-infra/db/sql/README.md b/vendor/github.com/ligato/cn-infra/db/sql/README.md deleted file mode 100644 index 7a59d33037..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# SQL-like datastore - -The `sql` package defines the API for accessing a data store using SQL. -The `Broker` interface allows reading and manipulating with data. -The `Watcher` API provides functions for monitoring of changes -in a data store. - -## Features - -- The user of the API has full control over the SQL statements, - types & bindings passed to the `Broker`. -- Expressions: - - Helper functions alleviate the need to write SQL strings. - - The user can choose to only write expressions using helper - functions - - The user can write portions of SQL statements by a hand - (the `sql.Exp` helper function) and combine them with other - expressions -- The user can optionally use reflection to simplify repetitive work - with Iterators & Go structures -- The API will be reused for different databases. - A specific implementation will be provided for each database. diff --git a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/README.md b/vendor/github.com/ligato/cn-infra/db/sql/cassandra/README.md deleted file mode 100644 index fc2e16b0b4..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Cassandra implementation of Broker interface - -The API was tested with Cassandra 3 and supports: -- UDT (User Defined Types) / embedded structs and honors gocql.Marshaler/gocql.Unmarshaler -- handling all primitive types (like int aliases , IP address); - - net.IP can be stored as ipnet - - net.IPNet can be stored with a MarshalCQL/UnmarshalCQL wrapper go structure -- dumping all rows except for Table -- quering by secondary indexes -- mocking of gocql behavior (using gockle library)in automated unit tests - -# Cassandra Timeouts - -The API will allow the client to configure either single node or multi-node cluster. -Also, the client can configure following timeouts: -- DialTimeout - - Initial connection timeout, used during initial dial to server - - Default value is 600ms -- OpTimeout - - Connection timeout, used during executing query - - Default value is 600ms -- RedialInterval - - If not zero, gocql attempt to reconnect known DOWN nodes in every ReconnectSleep (ReconnectInterval) - - Default value is 60s -- Example -```go - config := &cassandra.Config{ - Endpoints: 127.0.0.1, - Port: 9042, - DialTimeout: 600 * time.Millisecond, - OpTimeout: 600 * time.Millisecond, - RedialInterval: 60 * time.Second, - } - - clientConfig, err := cassandra.ConfigToClientConfig(config) -``` - -The timeout parameters are defined here [config.go](config.go) - -Supported by underlying gocql structure [ClusterConfig](../../../vendor/github.com/gocql/gocql/cluster.go) - -# Cassandra Data Consistency - -The API will allow the client to configure consistency level for both -- Session -- Query (to be implemented) - -Supported by underlying gocql structure [Session](../../../vendor/github.com/gocql/gocql/session.go) - - -# Factors to be considered for achieving desired consistency level. -- Replication strategy - - A replication strategy determines the nodes where replicas are placed. - - SimpleStrategy: Use for a single data center only. - - NetworkTopologyStrategy: Used for more than one data center. -- Replication factor - - The total number of replicas across the cluster is referred to as the replication factor. - - A replication factor of 1 means that there is only one copy of each row on one node. - - A replication factor of 2 means that there are two copies of each row, and each copy is stored on a different node. - - The replication factor should not exceed the number of nodes in the cluster. - -- To achieve Quorum - - Quorum = (sum_of_replication_factors / 2) + 1 - - (nodes_written + nodes_read) > replication_factor - -- References - - [Apache Cassandra](http://cassandra.apache.org/doc/latest/operating/index.html) - - [Cassandra Data Replication](http://docs.datastax.com/en/archived/cassandra/2.0/cassandra/architecture/architectureDataDistributeReplication_c.html) - - [Cassandra Consistency Levels](http://docs.datastax.com/en/cassandra/latest/cassandra/dml/dmlConfigConsistency.html) diff --git a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassa_broker_impl.go b/vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassa_broker_impl.go deleted file mode 100644 index 269c88508b..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassa_broker_impl.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cassandra - -import ( - "github.com/ligato/cn-infra/db/sql" - "github.com/ligato/cn-infra/utils/structs" - "github.com/willfaught/gockle" -) - -// NewBrokerUsingSession is a Broker constructor. Use it like this: -// -// session := gockle.NewSession(gocql.NewCluster("172.17.0.1")) -// defer db.Close() -// db := NewBrokerUsingSession(session) -// db.ListValues(...) -func NewBrokerUsingSession(gocqlSession gockle.Session) *BrokerCassa { - return &BrokerCassa{session: gocqlSession} -} - -// BrokerCassa implements interface db.Broker. This implementation simplifies work with gocql in the way -// that it is not need to write "SQL" queries. But the "SQL" is not really hidden, one can use it if needed. -// The "SQL" queries are generated from the go structures (see more details in Put, Delete, Key, GetValue, ListValues). -type BrokerCassa struct { - session gockle.Session -} - -// ValIterator is an iterator returned by ListValues call -type ValIterator struct { - Delegate gockle.Iterator -} - -// ErrIterator is an iterator that stops immediately and just returns last error on Close() -type ErrIterator struct { - LastError error -} - -// Put - see the description in interface sql.Broker.Put(). -// Put generates statement & binding for gocql Exec(). -// Any error returned from gockle.Session.Exec is propagated upwards. -func (pdb *BrokerCassa) Put(where sql.Expression, pointerToAStruct interface{} /*TODO TTL, opts ...datasync.PutOption*/) error { - statement, bindings, err := PutExpToString(where, pointerToAStruct) - - if err != nil { - return err - } - return pdb.session.Exec(statement, bindings...) -} - -// Exec - see the description in interface sql.Broker.ExecPut() -// Exec runs statement (AS-IS) using gocql -func (pdb *BrokerCassa) Exec(statement string, binding ...interface{}) error { - return pdb.session.Exec(statement, binding...) -} - -// Delete - see the description in interface sql.Broker.ExecPut() -// Delete generates statement & binding for gocql Exec() -func (pdb *BrokerCassa) Delete(fromWhere sql.Expression) error { - statement, bindings, err := ExpToString(fromWhere) - if err != nil { - return err - } - return pdb.session.Exec("DELETE"+statement, bindings...) -} - -// GetValue - see the description in interface sql.Broker.GetValue() -// GetValue just iterate once for ListValues() -func (pdb *BrokerCassa) GetValue(query sql.Expression, reqObj interface{}) (found bool, err error) { - it := pdb.ListValues(query) - stop := it.GetNext(reqObj) - return !stop, it.Close() -} - -// ListValues retrieves an iterator for elements stored under the provided key. -// ListValues runs query (AS-IS) using gocql Scan Iterator. -func (pdb *BrokerCassa) ListValues(query sql.Expression) sql.ValIterator { - queryStr, binding, err := SelectExpToString(query) - if err != nil { - return &ErrIterator{err} - } - - it := pdb.session.ScanIterator(queryStr, binding...) - return &ValIterator{it} -} - -// GetNext returns the following item from the result set. If data was returned, found is set to true. -// argument "outVal" can be: -// - pointer to structure -// - map -func (it *ValIterator) GetNext(outVal interface{}) (stop bool) { - if m, ok := outVal.(map[string]interface{}); ok { - ok = it.Delegate.ScanMap(m) - return !ok //if not ok than stop - } - - _, ptrs := structs.ListExportedFieldsPtrs(outVal, cqlExported) - ok := it.Delegate.Scan(ptrs...) - return !ok //if not ok than stop -} - -// Close the iterator. Note, the error is important (may occure during marshalling/un-marshalling) -func (it *ValIterator) Close() error { - return it.Delegate.Close() -} - -// GetNext returns the following item from the result set. If data was returned, found is set to true. -// argument "outVal" can be: -// - pointer to structure -// - map -func (it *ErrIterator) GetNext(outVal interface{}) (stop bool) { - return true -} - -// Close the iterator. Note, the error is important (may occure during marshalling/un-marshalling) -func (it *ErrIterator) Close() error { - return it.LastError -} diff --git a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassa_txn_impl.go b/vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassa_txn_impl.go deleted file mode 100644 index 15a6abf424..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassa_txn_impl.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cassandra - -import ( - "github.com/ligato/cn-infra/db/sql" -) - -// NewTxn creates a new Data Broker transaction. A transaction can -// hold multiple operations that are all committed to the data -// store together. After a transaction has been created, one or -// more operations (put or delete) can be added to the transaction -// before it is committed. -func (pdb *BrokerCassa) NewTxn() sql.Txn { - // TODO Cassandra Batch/TXN - panic("not implemented") -} diff --git a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassa_watcher_impl.go b/vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassa_watcher_impl.go deleted file mode 100644 index 4959ce1aee..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassa_watcher_impl.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cassandra - -/*TODO use https://issues.apache.org/jira/browse/CASSANDRA-8844 -type WatcherCassa struct { - watcher keyval.BytesWatcher - serializer keyval.Serializer -} -*/ diff --git a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassandra.conf b/vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassandra.conf deleted file mode 100644 index 20393a0769..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassandra.conf +++ /dev/null @@ -1,26 +0,0 @@ -# A list of host IP addresses of cassandra cluster nodes. -endpoints: - - "172.17.0.1:2379" - -# Cassandra port -port: 9042 - -# Connection timeout in milliseconds. The default value is 600ms. -op_timeout: 600 - -# Initial session timeout in milliseconds, used during initial dial to server. The default value is 600ms. -dial_timeout: 600 - -# If set, gocql attempt to reconnect known down nodes in every ReconnectSleep. -redial_interval: 600 - -# ProtoVersion sets the version of the native protocol to use, this will -# enable features in the driver for specific protocol versions, generally this -# should be set to a known version (2,3,4) for the cluster being connected to. -# If it is 0 or unset (the default) then the driver will attempt to discover the -# highest supported protocol for the cluster. In clusters with nodes of different -# versions the protocol selected is not defined (ie, it can be any of the supported in the cluster) -protocol_version: 0 - -# Transport Layer Security setup -tls: \ No newline at end of file diff --git a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/config.go b/vendor/github.com/ligato/cn-infra/db/sql/cassandra/config.go deleted file mode 100644 index 31050b01c4..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/config.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cassandra - -import ( - "strings" - "time" - - "strconv" - - "github.com/gocql/gocql" -) - -//TLS used to configure TLS -type TLS struct { - Certfile string `json:"cert_path"` // client certificate - Keyfile string `json:"key_path"` // client private key - CAfile string `json:"ca_path"` // certificate authority - EnableHostVerification bool `json:"enable_host_verification"` // whether to skip verification of server name & certificate - Enabled bool `json:"enabled"` // enable/disable TLS -} - -// Config Configuration for Cassandra clients loaded from a configuration file -type Config struct { - // A list of host addresses of cluster nodes. - Endpoints []string `json:"endpoints"` - - // port for Cassandra (default: 9042) - Port int `json:"port"` - - // session timeout (default: 600ms) - OpTimeout time.Duration `json:"op_timeout"` - - // initial session timeout, used during initial dial to server (default: 600ms) - DialTimeout time.Duration `json:"dial_timeout"` - - // If not zero, gocql attempt to reconnect known DOWN nodes in every ReconnectSleep. - RedialInterval time.Duration `json:"redial_interval"` - - // ProtoVersion sets the version of the native protocol to use, this will - // enable features in the driver for specific protocol versions, generally this - // should be set to a known version (2,3,4) for the cluster being connected to. - // - // If it is 0 or unset (the default) then the driver will attempt to discover the - // highest supported protocol for the cluster. In clusters with nodes of different - // versions the protocol selected is not defined (ie, it can be any of the supported in the cluster) - ProtocolVersion int `json:"protocol_version"` - - //TLS used to configure TLS - TLS TLS `json:"tls"` -} - -// ClientConfig wrapping gocql ClusterConfig -type ClientConfig struct { - *gocql.ClusterConfig -} - -const defaultOpTimeout = 600 * time.Millisecond -const defaultDialTimeout = 600 * time.Millisecond -const defaultRedialInterval = 60 * time.Second -const defaultProtocolVersion = 4 - -// ConfigToClientConfig transforms the yaml configuration into ClientConfig. -// If the configuration of endpoints is invalid, error ErrInvalidEndpointConfig -// is returned. -func ConfigToClientConfig(ymlConfig *Config) (*ClientConfig, error) { - - timeout := defaultOpTimeout - if ymlConfig.OpTimeout > 0 { - timeout = ymlConfig.OpTimeout - } - - connectTimeout := defaultDialTimeout - if ymlConfig.DialTimeout > 0 { - connectTimeout = ymlConfig.DialTimeout - } - - reconnectInterval := defaultRedialInterval - if ymlConfig.RedialInterval > 0 { - reconnectInterval = ymlConfig.RedialInterval - } - - protoVersion := defaultProtocolVersion - if ymlConfig.ProtocolVersion > 0 { - protoVersion = ymlConfig.ProtocolVersion - } - - endpoints, port, err := getEndpointsAndPort(ymlConfig.Endpoints) - if err != nil { - return nil, err - } - - var sslOpts *gocql.SslOptions - if ymlConfig.TLS.Enabled { - sslOpts = &gocql.SslOptions{ - CaPath: ymlConfig.TLS.CAfile, - CertPath: ymlConfig.TLS.Certfile, - KeyPath: ymlConfig.TLS.Keyfile, - EnableHostVerification: ymlConfig.TLS.EnableHostVerification, - } - } - - clientConfig := &gocql.ClusterConfig{ - Hosts: endpoints, - Port: port, - Timeout: timeout * time.Millisecond, - ConnectTimeout: connectTimeout * time.Millisecond, - ReconnectInterval: reconnectInterval * time.Second, - ProtoVersion: protoVersion, - SslOpts: sslOpts, - } - - cfg := &ClientConfig{ClusterConfig: clientConfig} - - return cfg, nil -} - -// CreateSessionFromConfig creates and initializes the cluster based on the supplied config -// and returns a new session object that can be used to interact with the database. -// The function propagates errors returned from gocql.CreateSession(). -func CreateSessionFromConfig(config *ClientConfig) (*gocql.Session, error) { - - gocqlClusterConfig := gocql.NewCluster(HostsAsString(config.Hosts)) - gocqlClusterConfig.Port = config.Port - gocqlClusterConfig.ConnectTimeout = config.ConnectTimeout - gocqlClusterConfig.ReconnectInterval = config.ReconnectInterval - gocqlClusterConfig.Timeout = config.Timeout - gocqlClusterConfig.ProtoVersion = config.ProtoVersion - gocqlClusterConfig.SslOpts = config.SslOpts - - session, err := gocqlClusterConfig.CreateSession() - - if err != nil { - return nil, err - } - - return session, nil -} - -// HostsAsString converts an array of hosts addresses into a comma separated string -func HostsAsString(hostArr []string) string { - return strings.Join(hostArr, ",") -} - -//getEndpointsAndPort does string manipulation to extract []endpoints and port eg: "127.0.0.1:9042" or "127.0.0.1:9042,127.0.0.2:9042" -func getEndpointsAndPort(endpoints []string) (endpointsR []string, portR int, err error) { - var resultEndpoints []string - var resultPort int - - if len(endpoints) > 1 { - return nil, 0, ErrInvalidEndpointConfig - } - - if len(endpoints[0]) > 0 { - v := endpoints[0] - - if !strings.Contains(v, ":") { - return nil, 0, ErrInvalidEndpointConfig - } - - if strings.Contains(v, ",") { - endpointsAndPort := strings.Split(v, ",") - for _, val := range endpointsAndPort { - endpointAndPort := strings.Split(val, ":") - resultEndpoints = append(resultEndpoints, endpointAndPort[0]) - resultPort, err = strconv.Atoi(endpointAndPort[1]) - if err != nil { - return nil, 0, err - } - } - - } else { - endpointAndPort := strings.Split(v, ":") - resultEndpoints = append(resultEndpoints, endpointAndPort[0]) - resultPort, err = strconv.Atoi(endpointAndPort[1]) - if err != nil { - return nil, 0, err - } - } - } else { - return nil, 0, ErrInvalidEndpointConfig - } - - return resultEndpoints, resultPort, nil -} diff --git a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/doc.go b/vendor/github.com/ligato/cn-infra/db/sql/cassandra/doc.go deleted file mode 100644 index 0de3a1bbf5..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/doc.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cassandra is the implementation of the SQL Data Broker client -// API for the Cassandra data store. See cn-infra/db/sql for the definition -// of the key-value Data Broker client API. -// -// The entity that provides access to the data store is called gocql.Session (wrapped by Broker for convenience). -// -// +--------+ +----------+ crud +-----------+ -// | Broker | ----> | Session | ----> | Cassandra | -// +--------+ +----------+ +-----------+ -// -// To create a Session use the following function -// -// import "github.com/gocql/gocql" -// -// cluster := gocql.NewCluster("172.17.0.1") -// cluster.Keyspace = "demo" -// session, err := cluster.CreateSession() -// -// Then create broker instance: -// -// import ( -// "github.com/ligato/cn-infra/db/sql/cassandra" -// "github.com/willfaught/gockle" -// ) -// db := cassandra.NewBrokerUsingSession(gockle.NewSession(session))) -// -// To insert single key-value pair into Cassandra run (both values are pointers, JamesBond is instance of User struct.): -// db.Put(sql.PK(&JamesBond.ID), JamesBond) -// To remove a value identified by key: -// datasync.Delete(sql.FROM(JamesBond, sql.WHERE(sql.PK(&JamesBond.ID))) -// -// To retrieve a value identified by key (both values are pointers): -// data, found, rev, err := db.GetValue(sql.FROM(UserTable, sql.WHERE(sql.Field(&UserTable.ID, sql.EQ("James Bond")))) -// if err == nil && found { -// ... -// } -// -// To retrieve all values matching a key prefix: -// itr, err := db.ListValues(sql.FROM(UserTable, sql.WHERE(sql.Field(&UserTable.LastName, sql.EQ("Bond")))) -// if err != nil { -// for { -// data, allReceived, rev, err := itr.GetNext() -// if allReceived { -// break -// } -// if err != nil { -// return err -// } -// process data... -// } -// } -// -// To retrieve values more conveniently directrly in slice (without using iterator): -// users := &[]User{} -// err := sql.SliceIt(users, db.ListValues(sql.FROM(UserTable, -// sql.WHERE(sql.Field(&UserTable.LastName, sql.EQ("Bond")))) -// -// -package cassandra diff --git a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/plugin_impl_cassa.go b/vendor/github.com/ligato/cn-infra/db/sql/cassandra/plugin_impl_cassa.go deleted file mode 100644 index 93b9c78fe6..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/plugin_impl_cassa.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cassandra - -import ( - "errors" - - "github.com/ligato/cn-infra/core" - "github.com/ligato/cn-infra/db/sql" - "github.com/ligato/cn-infra/flavors/local" - "github.com/ligato/cn-infra/health/statuscheck" - "github.com/ligato/cn-infra/utils/safeclose" - "github.com/willfaught/gockle" -) - -// -const ( - probeCassandraConnection = "SELECT keyspace_name FROM system_schema.keyspaces" -) - -// Plugin implements Plugin interface therefore can be loaded with other plugins -type Plugin struct { - Deps // inject - - clientConfig *ClientConfig - session gockle.Session -} - -// Deps is here to group injected dependencies of plugin -// to not mix with other plugin fields. -type Deps struct { - local.PluginInfraDeps // inject -} - -var ( - // ErrMissingVisitorEntity is error returned when visitor is missing entity. - ErrMissingVisitorEntity = errors.New("cassandra: visitor is missing entity") - - // ErrMissingEntityField is error returned when visitor entity is missing field. - ErrMissingEntityField = errors.New("cassandra: visitor entity is missing field") - - // ErrUnexportedEntityField is error returned when visitor entity has unexported field. - ErrUnexportedEntityField = errors.New("cassandra: visitor entity with unexported field") - - // ErrInvalidEndpointConfig is error returned when endpoint and port are not in valid format. - ErrInvalidEndpointConfig = errors.New("cassandra: invalid configuration, endpoint and port not in valid format") -) - -// Init is called at plugin startup. The session to Cassandra is established. -func (p *Plugin) Init() (err error) { - if p.session != nil { - return nil // skip initialization - } - - // Retrieve config - var cfg Config - found, err := p.PluginConfig.GetValue(&cfg) - // need to be strict about config presence for ETCD - if !found { - p.Log.Info("cassandra client config not found ", p.PluginConfig.GetConfigName(), - " - skip loading this plugin") - return nil - } - if err != nil { - return err - } - - // Init session - p.clientConfig, err = ConfigToClientConfig(&cfg) - if err != nil { - return err - } - - if p.session == nil && p.clientConfig != nil { - session, err := CreateSessionFromConfig(p.clientConfig) - if err != nil { - return err - } - - p.session = gockle.NewSession(session) - } - - // Register for providing status reports (polling mode) - if p.StatusCheck != nil { - if p.session != nil { - p.StatusCheck.Register(core.PluginName(p.String()), func() (statuscheck.PluginState, error) { - broker := p.NewBroker() - err := broker.Exec(`select keyspace_name from system_schema.keyspaces`) - if err == nil { - return statuscheck.OK, nil - } - return statuscheck.Error, err - }) - } else { - p.Log.Warnf("Cassandra connection not available") - } - } else { - p.Log.Warnf("Unable to start status check for Cassandra") - } - - return nil -} - -// AfterInit is called by the Agent Core after all plugins have been initialized. -func (p *Plugin) AfterInit() error { - return nil -} - -// FromExistingSession is used mainly for testing -func FromExistingSession(session gockle.Session) *Plugin { - return &Plugin{session: session} -} - -// NewBroker returns a Broker instance to work with Cassandra Data Base -func (p *Plugin) NewBroker() sql.Broker { - return NewBrokerUsingSession(p.session) -} - -// Close resources -func (p *Plugin) Close() error { - safeclose.Close(p.session) - return nil -} - -// String returns if set Deps.PluginName or "cassa-client" otherwise -func (p *Plugin) String() string { - if len(p.Deps.PluginName) == 0 { - return "cassa-client" - } - return string(p.Deps.PluginName) -} diff --git a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/query.go b/vendor/github.com/ligato/cn-infra/db/sql/cassandra/query.go deleted file mode 100644 index 2b77d9ea5c..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/cassandra/query.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cassandra - -import ( - "bytes" - "fmt" - r "reflect" - "strings" - - "github.com/ligato/cn-infra/db/sql" - "github.com/ligato/cn-infra/utils/structs" -) - -// PutExpToString converts expression to string & slice of bindings -func PutExpToString(whereCondition sql.Expression, entity interface{}) (sqlStr string, bindings []interface{}, - err error) { - - whereCondtionStr := &toStringVisitor{entity: entity} - whereCondition.Accept(whereCondtionStr) - - statement, _, err := updateSetExpToString(sql.EntityTableName(entity), /*TODO extract method / make customizable*/ - entity /*, TODO TTL*/) - if err != nil { - return "", nil, err - } - - _, bindings = structs.ListExportedFieldsPtrs(entity, cqlExported, filterOutPK) - whereBinding := whereCondtionStr.Binding() - if whereBinding != nil { - bindings = append(bindings, whereBinding...) - } - - return strings.Trim(statement+" WHERE "+whereCondtionStr.String(), " "), bindings, nil -} - -// SelectExpToString converts expression to string & slice of bindings -func SelectExpToString(fromWhere sql.Expression) (sqlStr string, bindings []interface{}, - err error) { - - findEntity := &findEntityVisitor{} - fromWhere.Accept(findEntity) - - fromWhereStr := &toStringVisitor{entity: findEntity.entity} - fromWhere.Accept(fromWhereStr) - - fieldsStr := selectFields(findEntity.entity) - if err != nil { - return "", nil, err - } - fromWhereBindings := fromWhereStr.Binding() - - whereStr := fromWhereStr.String() - if strings.Contains(whereStr, "AND") { - whereStr = whereStr + " ALLOW FILTERING" - } - return "SELECT " + fieldsStr + whereStr, fromWhereBindings, nil -} - -// ExpToString converts expression to string & slice of bindings -func ExpToString(exp sql.Expression) (sql string, bindings []interface{}, err error) { - findEntity := &findEntityVisitor{} - exp.Accept(findEntity) - - stringer := &toStringVisitor{entity: findEntity.entity} - exp.Accept(stringer) - - return stringer.String(), stringer.Binding(), stringer.lastError -} - -type toStringVisitor struct { - entity interface{} - generated bytes.Buffer - binding []interface{} - lastError error -} - -// String converts generated byte Buffer to string -func (visitor *toStringVisitor) String() string { - return visitor.generated.String() -} - -// Binding is a getter... -func (visitor *toStringVisitor) Binding() []interface{} { - return visitor.binding -} - -// VisitPrefixedExp generates part of SQL expression -func (visitor *toStringVisitor) VisitPrefixedExp(exp *sql.PrefixedExp) { - if exp.Prefix == "FROM" { - visitor.generated.WriteString(" FROM ") - visitor.generated.WriteString(sql.EntityTableName(visitor.entity)) - } else { - visitor.generated.WriteString(exp.Prefix) - } - if exp.AfterPrefix != nil { - for _, exp := range exp.AfterPrefix { - exp.Accept(visitor) - } - } - visitor.generated.WriteString(exp.Suffix) - - if exp.Prefix != "FROM" && exp.Binding != nil && len(exp.Binding) > 0 { - if visitor.binding != nil { - visitor.binding = append(visitor.binding, exp.Binding...) - } else { - visitor.binding = exp.Binding - } - } -} - -// VisitPrefixedExp generates part of SQL expression -func (visitor *toStringVisitor) VisitFieldExpression(exp *sql.FieldExpression) { - if visitor.entity == nil { - visitor.lastError = ErrMissingVisitorEntity - } else { - field, found := structs.FindField(exp.PointerToAField, visitor.entity) - if !found { - visitor.lastError = ErrMissingEntityField - return - } - fieldName, found := fieldName(field) - if !found { - visitor.lastError = ErrUnexportedEntityField - return - } - //visitor.generated.WriteString(" ") - visitor.generated.WriteString(fieldName) - - if exp.AfterField != nil { - exp.AfterField.Accept(visitor) - } - } -} - -// cqlExported checks the cql tag in StructField and parses the field name -func cqlExported(field *r.StructField) (exported bool) { - cql := field.Tag.Get("cql") - if len(cql) > 0 { - if cql == "-" { - return false - } - return true - } - return true -} - -// cqlExportedWithFieldName checks the cql tag in StructField and parses the field name -func cqlExportedWithFieldName(field *r.StructField) (fieldName string, exported bool) { - cql := field.Tag.Get("cql") - if len(cql) > 0 { - if cql == "-" { - return cql, false - } - return cql, true - } - return field.Name, true -} - -// isFieldPK checks the pk tag in StructField and parses the field name -func isFieldPK(field *r.StructField) (isPK bool) { - result := false - pk := field.Tag.Get("pk") - if len(pk) > 0 { - result = true - } - return result -} - -// filterOutPK used to filter out primary key from update statements only -func filterOutPK(field *r.StructField) (filterPK bool) { - return !isFieldPK(field) -} - -func fieldName(field *r.StructField) (name string, exported bool) { - structExported := structs.FieldExported(field) - if !structExported { - return field.Name, structExported - } - - return cqlExportedWithFieldName(field) -} - -// selectFields generates comma separated field names string -func selectFields(val interface{} /*, opts Options*/) (statement string) { - fields := structs.ListExportedFields(val, cqlExported) - ret := bytes.Buffer{} - first := true - for _, field := range fields { - fieldName, exported := fieldName(field) - if exported { - if first { - first = false - } else { - ret.WriteString(", ") - } - - ret.WriteString(fieldName) - } - } - - return ret.String() -} - -// SliceOfFields generates slice of translated (cql tag) field names -// used in creating update statement only -func sliceOfFieldNames(val interface{} /*, opts Options*/) (fieldNames []string) { - fields := structs.ListExportedFields(val, filterOutPK) - fieldNames = []string{} - for _, field := range fields { - fieldName, exported := fieldName(field) - if exported { - fieldNames = append(fieldNames, fieldName) - } - } - - return fieldNames -} - -// SliceOfFieldsWithValPtrs generates slice of translated (cql tag) field names with field values -// used for unit testing purposes only - list_values test -func SliceOfFieldsWithValPtrs(val interface{} /*, opts Options*/) (fieldNames []string, vals []interface{}) { - fields, vals := structs.ListExportedFieldsPtrs(val) - - fieldNames = []string{} - for _, field := range fields { - fieldName, exported := fieldName(field) - if exported { - fieldNames = append(fieldNames, fieldName) - } - } - - return fieldNames, vals -} - -// updateSetExpToString generates UPDATE + SET part of SQL statement -// for fields of an entity -func updateSetExpToString(cfName string, val interface{} /*, opts Options*/) ( - statement string, fields []string, err error) { - - fields = sliceOfFieldNames(val) - - statement = updateStatement(cfName, fields) - return statement, fields, nil -} - -// UPDATE keyspace.Movies SET col1 = val1, col2 = val2 -func updateStatement(cfName string, fields []string /*, opts Options*/) (statement string) { - buf := new(bytes.Buffer) - buf.WriteString(fmt.Sprintf("UPDATE %s ", cfName)) - - /* - // Apply options - if opts.TTL != 0 { - buf.WriteString("USING TTL ") - buf.WriteString(strconv.FormatFloat(opts.TTL.Seconds(), 'f', 0, 64)) - buf.WriteRune(' ') - }*/ - - buf.WriteString("SET ") - first := true - for _, fieldName := range fields { - if !first { - buf.WriteString(", ") - } else { - first = false - } - buf.WriteString(fieldName) - buf.WriteString(` = ?`) - } - - return buf.String() -} - -type findEntityVisitor struct { - entity interface{} -} - -// VisitPrefixedExp checks for "FROM" expression to find out the entity -func (visitor *findEntityVisitor) VisitPrefixedExp(exp *sql.PrefixedExp) { - if exp.Prefix == "FROM" { - if len(exp.Binding) == 1 && r.Indirect(r.ValueOf(exp.Binding[0])).Kind() == r.Struct { - visitor.entity = exp.Binding[0] - } - } else if exp.AfterPrefix != nil { - for _, exp := range exp.AfterPrefix { - exp.Accept(visitor) - } - } -} - -// VisitFieldExpression just propagates to AfterFieldExpression -func (visitor *findEntityVisitor) VisitFieldExpression(exp *sql.FieldExpression) { - if exp.AfterField != nil { - exp.AfterField.Accept(visitor) - } -} diff --git a/vendor/github.com/ligato/cn-infra/db/sql/doc.go b/vendor/github.com/ligato/cn-infra/db/sql/doc.go deleted file mode 100644 index 9c9a1d8232..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package sql provides an abstraction of a data store that supports an -// SQL-like query language and defines the SQL data broker API. The SQL -// data broker API consists of the Broker and KeyValProtoWatcher APIs -// for accessing data in an SQL data store. -package sql diff --git a/vendor/github.com/ligato/cn-infra/db/sql/slice_utils.go b/vendor/github.com/ligato/cn-infra/db/sql/slice_utils.go deleted file mode 100644 index 0efd76c188..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/slice_utils.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sql - -import ( - "reflect" - - "github.com/ligato/cn-infra/utils/safeclose" -) - -// SliceIt reads everything from the ValIterator and stores it to pointerToASlice. -// It closes the iterator (since nothing left in the iterator). -func SliceIt(pointerToASlice interface{}, it ValIterator) error { - /* TODO defer func() { - if exp := recover(); exp != nil && it != nil { - logger.Error(exp) - exp = safeclose.Close(it) - if exp != nil { - logger.Error(exp) - } - } - }()*/ - - sl := reflect.ValueOf(pointerToASlice) - if sl.Kind() == reflect.Ptr { - sl = sl.Elem() - } else { - panic("must be pointer") - } - - if sl.Kind() != reflect.Slice { - panic("must be slice") - } - - sliceType := sl.Type() - - sliceElemType := sliceType.Elem() - sliceElemPtr := sliceElemType.Kind() == reflect.Ptr - if sliceElemPtr { - sliceElemType = sliceElemType.Elem() - } - for { - row := reflect.New(sliceElemType) - if stop := it.GetNext(row.Interface()); stop { - break - } - - if sliceElemPtr { - sl.Set(reflect.Append(sl, row)) - } else { - sl.Set(reflect.Append(sl, row.Elem())) - } - } - - return safeclose.Close(it) -} diff --git a/vendor/github.com/ligato/cn-infra/db/sql/sql_broker_api.go b/vendor/github.com/ligato/cn-infra/db/sql/sql_broker_api.go deleted file mode 100644 index dbd9901f8b..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/sql_broker_api.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sql - -import ( - "io" -) - -// Broker executes SQL statements in the data store. -// It marshals/un-marshals go structures. -type Broker interface { - // Put puts single value into the data store. - // Example usage: - // - // err = db.Put("ID='James Bond'", &User{"James Bond", "James", "Bond"}) - // - Put(where Expression, inBinding interface{} /* TODO opts ...PutOption*/) error - - // NewTxn creates a transaction / batch. - NewTxn() Txn - - // GetValue retrieves one item based on the . If the item exists, - // it is un-marshaled into the . - // - // Example usage 1: - // - // query := sql.FROM(UserTable, sql.WHERE(sql.Field(&UserTable.ID, sql.EQ("Bond"))) - // user := &User{} - // found, err := db.GetValue(query, user) - // - // Example usage 2: - // - // query := sql.FROM(JamesBond, sql.WHERE(sql.PK(&JamesBond.ID)) - // user := &User{} - // found, err := db.GetValue(query, user) - // - GetValue(query Expression, outBinding interface{}) (found bool, err error) - - // ListValues returns an iterator that enables traversing all items - // returned by the . - // Use utilities to: - // - generate query string - // - fill slice by values from iterator (SliceIt). - // - // Example usage 1 (fill slice with values from iterator): - // - // query := sql.FROM(UserTable, sql.WHERE(sql.Field(&UserTable.LastName, sql.EQ("Bond"))) - // iterator := db.ListValues(query) - // users := &[]User{} - // err := sql.SliceIt(users, iterator) - // - // Example usage 2: - // - // query := sql.FROM(UserTable, sql.WHERE(sql.Exec("last_name='Bond'"))) - // iterator := db.ListValues(query) - // users := &[]User{} - // err := sql.SliceIt(users, iterator) - // - // Example usage 3: - // - // iterator := db.ListValues("select ID, first_name, last_name from User where last_name='Bond'") - // user := map[string]interface{} - // stop := iterator.GetNext(user) - // - ListValues(query Expression) ValIterator - - // Delete removes data from the data store. - // Example usage 1: - // - // query := sql.FROM(JamesBond, sql.WHERE(sql.PK(&JamesBond.ID)) - // err := datasync.Delete(query) - // - // Example usage 2: - // - // err := datasync.Delete("from User where ID='James Bond'") - // - // Example usage 3: - // - // query := sql.FROM(UserTable, sql.WHERE(sql.Field(&UserTable.LastName, sql.EQ("Bond"))) - // err := datasync.Delete(query) - // - Delete(fromWhere Expression) error - - // Executes the SQL statement (can be used, for example, to create - // "table/type" if not exits...) - // Example usage: - // - // err := db.Exec("CREATE INDEX IF NOT EXISTS...") - Exec(statement string, bindings ...interface{}) error -} - -// ValIterator is an iterator returned by ListValues call. -type ValIterator interface { - // GetNext retrieves the current "row" from query result. - // GetValue is un-marshaled into the provided argument. - // The stop=true will be returned if there is no more record or if an error - // occurred (to get the error call Close()). - // When the stop=true is returned, the outBinding was not updated. - GetNext(outBinding interface{}) (stop bool) - - // Closer retrieves an error (if occurred) and releases the cursor. - io.Closer -} - -// Txn allows to group operations into the transaction or batch -// (depending on a particular data store). -// Transaction executes usually multiple operations in a more efficient way -// in contrast to executing them one by one. -type Txn interface { - // Put adds put operation into the transaction. - Put(where Expression, data interface{}) Txn - // Delete adds delete operation into the transaction. - Delete(fromWhere Expression) Txn - // Commit tries to commit the transaction. - Commit() error -} diff --git a/vendor/github.com/ligato/cn-infra/db/sql/sql_expression.go b/vendor/github.com/ligato/cn-infra/db/sql/sql_expression.go deleted file mode 100644 index d930c10e94..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/sql_expression.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sql - -import ( - "bytes" - "fmt" - "strings" -) - -// Expression represents part of SQL statement and optional binding ("?"). -type Expression interface { - // Stringer prints default representation of SQL to String. - // Different implementations can override this using package specific - // func ExpToString(). - String() string - - // Binding are values referenced ("?") from the statement. - GetBinding() []interface{} - - // Accepts calls the methods on Visitor. - Accept(Visitor) -} - -// Visitor is used for traversing an expression tree. -type Visitor interface { - VisitPrefixedExp(*PrefixedExp) - VisitFieldExpression(*FieldExpression) -} - -// PrefixedExp covers many SQL constructions. It implements sql.Expression -// interface. Instance of this structure is returned by many helper functions -// below. -type PrefixedExp struct { - Prefix string - AfterPrefix []Expression - Suffix string - Binding []interface{} -} - -// String returns Prefix + " " + AfterPrefix. -func (exp *PrefixedExp) String() string { - if exp.AfterPrefix == nil { - return exp.Prefix - } - - if exp.Prefix == "FROM" && len(exp.Binding) > 0 { - return exp.Prefix + " " + EntityTableName(exp.Binding[0]) + " " + ExpsToString(exp.AfterPrefix) - } - - return exp.Prefix + " " + ExpsToString(exp.AfterPrefix) -} - -// ExpsToString joins (without separator) individual expression string representations. -func ExpsToString(exps []Expression) string { - if exps != nil { - if len(exps) == 1 { - return exps[0].String() - } - - var buffer bytes.Buffer - for _, exp := range exps { - buffer.WriteString(exp.String()) - } - - return buffer.String() - } - return "" -} - -// GetBinding is a getter. -func (exp *PrefixedExp) GetBinding() []interface{} { - return exp.Binding -} - -// Accept calls VisitPrefixedExp(...) & Accept(AfterPrefix). -func (exp *PrefixedExp) Accept(visitor Visitor) { - visitor.VisitPrefixedExp(exp) -} - -// FieldExpression is used for addressing field of an entity in an SQL expression. -type FieldExpression struct { - PointerToAField interface{} - AfterField Expression -} - -// String returns Prefix + " " + AfterPrefix. -func (exp *FieldExpression) String() string { - prefix := fmt.Sprint("") - if exp.AfterField == nil { - return prefix - } - return prefix + " " + exp.AfterField.String() -} - -// GetBinding is a getter. -func (exp *FieldExpression) GetBinding() []interface{} { - return nil -} - -// Accept calls VisitFieldExpression(...) & Accept(AfterField). -func (exp *FieldExpression) Accept(visitor Visitor) { - visitor.VisitFieldExpression(exp) -} - -// SELECT keyword of an SQL expression. -func SELECT(entity interface{}, afterKeyword Expression, binding ...interface{}) Expression { - return &PrefixedExp{"SELECT", []Expression{FROM(entity, afterKeyword)}, "", binding} -} - -// FROM keyword of an SQL expression. -// Note, pointerToAStruct is assigned to Expression.binding. -// The implementation is supposed to try to cast to the sql.TableName & sql.SchemaName. -func FROM(pointerToAStruct interface{}, afterKeyword Expression) Expression { - return &PrefixedExp{"FROM", []Expression{afterKeyword}, "", []interface{}{pointerToAStruct}} -} - -// WHERE keyword of an SQL statement. -func WHERE(afterKeyword ...Expression) Expression { - return &PrefixedExp{" WHERE ", afterKeyword, "", nil} -} - -// DELETE keyword of an SQL statement. -func DELETE(entity interface{}, afterKeyword Expression) Expression { - return &PrefixedExp{"DELETE", []Expression{afterKeyword}, "", nil} -} - -// Exp function creates instance of sql.Expression from string statement & optional binding. -// Useful for: -// - rarely used parts of an SQL statements -// - CREATE IF NOT EXISTS statements -func Exp(statement string, binding ...interface{}) Expression { - return &PrefixedExp{statement, nil, "", binding} -} - -var emptyAND = &PrefixedExp{" AND ", nil, "", nil} - -// AND keyword of SQL expression -// -// Example usage (alternative 1 - spare sequence of partenthesis): -// -// WHERE(FieldEQ(&JamesBond.FirstName), AND(), FieldEQ(&JamesBond.LastName)) -// -// Example usage (alternative 2 - useful for nesting): -// -// WHERE(AND(FieldEQ(&JamesBond.FirstName), FieldEQ(&JamesBond.LastName))) -func AND(inside ...Expression) Expression { - return intelligentOperator(emptyAND, inside...) -} - -var emptyOR = &PrefixedExp{" OR ", nil, "", nil} - -// OR keyword of SQL expression -// -// Example usage 1 (generated string does not contain parenthesis surrounding OR): -// -// WHERE(FieldEQ(&PeterBond.FirstName), OR(), FieldEQ(&JamesBond.FirstName)) -// -// Example usage 2 (generated string does not contain parenthesis surrounding OR): -// -// WHERE(FieldEQ(&PeterBond.LastName), OR(FieldEQ(&PeterBond.FirstName), FieldEQ(&JamesBond.FirstName))) -// -func OR(inside ...Expression) Expression { - return intelligentOperator(emptyOR, inside...) -} - -func intelligentOperator(emptyOperator *PrefixedExp, inside ...Expression) Expression { - lenInside := len(inside) - if lenInside == 0 { - return emptyOperator - } - if lenInside == 1 { - return &PrefixedExp{emptyOperator.Prefix, inside, "", nil} - } - inside2 := []Expression{} - for i, exp := range inside { - if i > 0 { - inside2 = append(inside2, emptyOperator, exp) - } else { - inside2 = append(inside2, exp) - } - } - - return Parenthesis(inside2...) -} - -// Field is a helper function to address field of a structure. -// -// Example usage: -// Where(Field(&UsersTable.LastName, UsersTable, EQ('Bond')) -// // generates, for example, "WHERE last_name='Bond'" -func Field(pointerToAField interface{}, rigthOperand ...Expression) (exp Expression) { - if len(rigthOperand) == 0 { - return &FieldExpression{pointerToAField, nil} - } - return &FieldExpression{pointerToAField, rigthOperand[0]} -} - -// FieldEQ is combination of Field & EQ on the same pointerToAField. -// -// Example usage: -// FROM(JamesBond, Where(FieldEQ(&JamesBond.LastName)) -// // generates, for example, "WHERE last_name='Bond'" -// // because JamesBond is a pointer to an instance of a structure that in field LastName contains "Bond" -func FieldEQ(pointerToAField interface{}) (exp Expression) { - return &FieldExpression{pointerToAField, EQ(pointerToAField)} -} - -// PK is alias FieldEQ (user for better readability) -// -// Example usage: -// FROM(JamesBond, Where(PK(&JamesBond.LastName)) -// // generates, for example, "WHERE last_name='Bond'" -// // because JamesBond is a pointer to an instance of a structure that in field LastName contains "Bond" -func PK(pointerToAField interface{}) (exp Expression) { - return FieldEQ(pointerToAField) -} - -// EQ operator "=" used in SQL expressions -func EQ(binding interface{}) (exp Expression) { - return &PrefixedExp{" = ", []Expression{Exp("?", binding)}, "", nil} -} - -// GT operator ">" used in SQL expressions -func GT(binding interface{}) (exp Expression) { - return &PrefixedExp{" > ", []Expression{Exp("?", binding)}, "", nil} -} - -// GTE operator "=>" used in SQL expressions -func GTE(binding interface{}) (exp Expression) { - return &PrefixedExp{" => ", []Expression{Exp("?", binding)}, "", nil} -} - -// LT operator "<" used in SQL expressions -func LT(binding interface{}) (exp Expression) { - return &PrefixedExp{" < ", []Expression{Exp("?", binding)}, "", nil} -} - -// LTE operator "=<" used in SQL expressions -func LTE(binding interface{}) (exp Expression) { - return &PrefixedExp{" =< ", []Expression{Exp("?", binding)}, "", nil} -} - -// Parenthesis expression that surrounds "inside Expression" with "(" and ")" -func Parenthesis(inside ...Expression) (exp Expression) { - return &PrefixedExp{"(", inside, ")", nil} -} - -// IN operator of SQL expression -// FROM(UserTable,WHERE(FieldEQ(&UserTable.FirstName, IN(JamesBond.FirstName, PeterBond.FirstName))) -func IN(binding ...interface{}) (exp Expression) { - bindingRefs := strings.Repeat(",?", len(binding)) - return &PrefixedExp{" IN(", nil, bindingRefs[1:] + ")", binding} -} diff --git a/vendor/github.com/ligato/cn-infra/db/sql/sql_struct_metadata.go b/vendor/github.com/ligato/cn-infra/db/sql/sql_struct_metadata.go deleted file mode 100644 index 1087fff925..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/sql_struct_metadata.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sql - -import "reflect" - -// TableName interface specifies custom table name for SQL statements. -type TableName interface { - // TableName returns sql table name. - TableName() string -} - -// SchemaName interface specifies custom schema name for SQL statements. -type SchemaName interface { - // SchemaName returns sql schema name where the table resides - SchemaName() string -} - -// EntityTableName returns the table name, possibly prefixed with the schema -// name, associated with the . -// The function tries to cast to TableName and SchemaName in order to -// obtain the table name and the schema name, respectively. -// If table name cannot be obtained, the struct name is used instead. -// If schema name cannot be obtained, it is simply omitted from the result. -func EntityTableName(entity interface{}) string { - var tableName, schemaName string - if nameProvider, ok := entity.(TableName); ok { - tableName = nameProvider.TableName() - } - - if tableName == "" { - tableName = reflect.Indirect(reflect.ValueOf(entity)).Type().Name() - } - - if schemaNameProvider, ok := entity.(SchemaName); ok { - schemaName = schemaNameProvider.SchemaName() - } - - if schemaName == "" { - return tableName - } - - return schemaName + "." + tableName -} diff --git a/vendor/github.com/ligato/cn-infra/db/sql/sql_watcher_api.go b/vendor/github.com/ligato/cn-infra/db/sql/sql_watcher_api.go deleted file mode 100644 index 68ecf4d221..0000000000 --- a/vendor/github.com/ligato/cn-infra/db/sql/sql_watcher_api.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sql - -import "github.com/ligato/cn-infra/datasync" - -// Watcher defines API for monitoring changes in a datastore. -type Watcher interface { - // Watch starts to monitor changes in a data store. - // Watch events will be delivered to the . - Watch(callback func(WatchResp), statement ...string) error -} - -// WatchResp represents a notification about change. -// It is passed to the Watch callback. -type WatchResp interface { - // GetChangeType returns the type of the change. - GetChangeType() datasync.PutDel - - // GetValue returns the changed value. - GetValue(outBinding interface{}) error -} - -// ToChan TODO (not implemented yet) -func ToChan(respChan chan WatchResp, options ...interface{}) func(event WatchResp) { - return func(WatchResp) { - /*select { - case respChan <- resp: - case <-time.After(defaultOpTimeout): - log.Warn("Unable to deliver watch event before timeout.") - } - - select { - case wresp := <-recvChan: - for _, ev := range wresp.Events { - handleWatchEvent(respChan, ev) - } - case <-closeCh: - log.WithField("key", key).Debug("Watch ended") - return - }*/ - } -} diff --git a/vendor/github.com/ligato/cn-infra/flavors/connectors/all_connectors_flavor.go b/vendor/github.com/ligato/cn-infra/flavors/connectors/all_connectors_flavor.go deleted file mode 100644 index d0350b174c..0000000000 --- a/vendor/github.com/ligato/cn-infra/flavors/connectors/all_connectors_flavor.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package connectors - -import ( - "github.com/ligato/cn-infra/core" - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/datasync/kvdbsync" - "github.com/ligato/cn-infra/datasync/resync" - "github.com/ligato/cn-infra/db/keyval/consul" - "github.com/ligato/cn-infra/db/keyval/etcd" - "github.com/ligato/cn-infra/db/keyval/redis" - "github.com/ligato/cn-infra/db/sql/cassandra" - "github.com/ligato/cn-infra/flavors/local" - "github.com/ligato/cn-infra/messaging/kafka" -) - -// NewAgent returns a new instance of the Agent with plugins. -// It is an alias for core.NewAgent() to implicit use of the FlavorLocal. -func NewAgent(opts ...core.Option) *core.Agent { - return core.NewAgent(&AllConnectorsFlavor{}, opts...) -} - -// WithPlugins for adding custom plugins to SFC Controller -// is a callback that uses flavor input to -// inject dependencies for custom plugins that are in output -// -// Example: -// -// NewAgent(connectors.WithPlugins(func(flavor) { -// return []*core.NamedPlugin{{"my-plugin", &MyPlugin{DependencyXY: &flavor.ETCD}}} -// })) -func WithPlugins(listPlugins func(local *AllConnectorsFlavor) []*core.NamedPlugin) core.WithPluginsOpt { - return &withPluginsOpt{listPlugins} -} - -// AllConnectorsFlavor is a combination of all plugins that allow -// connectivity to external database/messaging... -// Effectively it is combination of ETCD, Kafka, Redis, Cassandra -// plugins. -// -// User/admin can enable those plugins/connectors by providing -// configs (at least endpoints) for them. -type AllConnectorsFlavor struct { - *local.FlavorLocal - - ETCD etcd.Plugin - ETCDDataSync kvdbsync.Plugin - - Consul consul.Plugin - ConsulDataSync kvdbsync.Plugin - - Kafka kafka.Plugin - - Redis redis.Plugin - RedisDataSync kvdbsync.Plugin - - Cassandra cassandra.Plugin - - ResyncOrch resync.Plugin // the order is important because of AfterInit() - - injected bool -} - -// Inject initializes flavor references/dependencies. -func (f *AllConnectorsFlavor) Inject() bool { - if f.injected { - return false - } - f.injected = true - - if f.FlavorLocal == nil { - f.FlavorLocal = &local.FlavorLocal{} - } - f.FlavorLocal.Inject() - - f.Consul.Deps.PluginInfraDeps = *f.InfraDeps("consul", local.WithConf()) - f.Consul.Deps.Resync = &f.ResyncOrch - InjectKVDBSync(&f.ConsulDataSync, &f.Consul, f.Consul.PluginName, f.FlavorLocal, &f.ResyncOrch) - - f.ETCD.Deps.PluginInfraDeps = *f.InfraDeps("etcd", local.WithConf()) - f.ETCD.Deps.Resync = &f.ResyncOrch - InjectKVDBSync(&f.ETCDDataSync, &f.ETCD, f.ETCD.PluginName, f.FlavorLocal, &f.ResyncOrch) - - f.FlavorLocal.StatusCheck.Transport = &datasync.CompositeKVProtoWriter{Adapters: []datasync.KeyProtoValWriter{ - &f.ETCDDataSync, - &f.ConsulDataSync, - }} - - f.Redis.Deps.PluginInfraDeps = *f.InfraDeps("redis", local.WithConf()) - InjectKVDBSync(&f.RedisDataSync, &f.Redis, f.Redis.PluginName, f.FlavorLocal, &f.ResyncOrch) - - f.Kafka.Deps.PluginInfraDeps = *f.InfraDeps("kafka", local.WithConf()) - - f.Cassandra.Deps.PluginInfraDeps = *f.InfraDeps("cassandra", local.WithConf()) - - f.ResyncOrch.PluginLogDeps = *f.LogDeps("resync-orch") - - return true -} - -// Plugins combines all Plugins in flavor to the list -func (f *AllConnectorsFlavor) Plugins() []*core.NamedPlugin { - f.Inject() - return core.ListPluginsInFlavor(f) -} - -// withPluginsOpt is return value of connectors.WithPlugins() utility -// to easily define new plugins for the agent based on LocalFlavor. -type withPluginsOpt struct { - callback func(local *AllConnectorsFlavor) []*core.NamedPlugin -} - -// OptionMarkerCore is just for marking implementation that it implements this interface -func (opt *withPluginsOpt) OptionMarkerCore() {} - -// Plugins methods is here to implement core.WithPluginsOpt go interface -// is a callback that uses flavor input for dependency injection -// for custom plugins (returned as NamedPlugin) -func (opt *withPluginsOpt) Plugins(flavors ...core.Flavor) []*core.NamedPlugin { - for _, flavor := range flavors { - if f, ok := flavor.(*AllConnectorsFlavor); ok { - return opt.callback(f) - } - } - - panic("wrong usage of connectors.WithPlugin() for other than AllConnectorsFlavor") -} diff --git a/vendor/github.com/ligato/cn-infra/flavors/connectors/connectors_util.go b/vendor/github.com/ligato/cn-infra/flavors/connectors/connectors_util.go deleted file mode 100644 index 3615b8d5f1..0000000000 --- a/vendor/github.com/ligato/cn-infra/flavors/connectors/connectors_util.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package connectors - -import ( - "github.com/ligato/cn-infra/core" - "github.com/ligato/cn-infra/datasync/kvdbsync" - "github.com/ligato/cn-infra/datasync/resync" - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/cn-infra/flavors/local" -) - -// InjectKVDBSync sets kvdbsync.Plugin dependencies. -// The intent of this method is just extract code that would be copy&pasted otherwise. -func InjectKVDBSync(dbsync *kvdbsync.Plugin, - db keyval.KvProtoPlugin, dbPlugName core.PluginName, local *local.FlavorLocal, resync resync.Subscriber) { - - dbsync.Deps.PluginLogDeps = *local.LogDeps(string(dbPlugName) + "-datasync") - dbsync.KvPlugin = db - dbsync.ResyncOrch = resync - if local != nil { - //Note, not injecting local.ETCDDataSync.ResyncOrch here - - dbsync.ServiceLabel = &local.ServiceLabel - } -} diff --git a/vendor/github.com/ligato/cn-infra/flavors/connectors/doc.go b/vendor/github.com/ligato/cn-infra/flavors/connectors/doc.go deleted file mode 100644 index 34b13e1654..0000000000 --- a/vendor/github.com/ligato/cn-infra/flavors/connectors/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package connectors defines "Connectors" flavor, which is a combination -// of all plugins that allow connectivity to external database/messaging. -package connectors diff --git a/vendor/github.com/ligato/cn-infra/flavors/local/doc.go b/vendor/github.com/ligato/cn-infra/flavors/local/doc.go deleted file mode 100644 index 199f320a20..0000000000 --- a/vendor/github.com/ligato/cn-infra/flavors/local/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package local defines "Local" flavor, which combines logging with StatusCheck. -package local diff --git a/vendor/github.com/ligato/cn-infra/flavors/local/local_flavor.go b/vendor/github.com/ligato/cn-infra/flavors/local/local_flavor.go deleted file mode 100644 index a85d891b6d..0000000000 --- a/vendor/github.com/ligato/cn-infra/flavors/local/local_flavor.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package local - -import ( - "github.com/ligato/cn-infra/config" - "github.com/ligato/cn-infra/core" - "github.com/ligato/cn-infra/health/statuscheck" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/logmanager" - "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/servicelabel" - "github.com/namsral/flag" -) - -// LogsFlagDefault - default file name -const LogsFlagDefault = "logs.conf" - -// LogsFlagUsage used as flag usage (see implementation in declareFlags()) -const LogsFlagUsage = "Location of the configuration files; also set via 'LOGS_CONFIG' env variable." - -// NewAgent returns a new instance of the Agent with plugins. -// It is an alias for core.NewAgent() to implicit use of the FlavorLocal. -// -// Example: -// -// local.NewAgent(local.WithPlugins(func(flavor *FlavorLocal) { -// return []*core.NamedPlugin{{"my-plugin", &MyPlugin{DependencyXY: &flavor.StatusCheck}}} -// })) -func NewAgent(opts ...core.Option) *core.Agent { - return core.NewAgent(&FlavorLocal{}, opts...) -} - -// WithPlugins for adding custom plugins to SFC Controller -// is a callback that uses flavor input to -// inject dependencies for custom plugins that are in output. -// -// Use this option either for core.NewAgent() or local.NewAgent() -// -// Example: -// -// NewAgent(local.WithPlugins(func(flavor) { -// return []*core.NamedPlugin{{"my-plugin", &MyPlugin{DependencyXY: &flavor.StatusCheck}}} -// })) -func WithPlugins(listPlugins func(local *FlavorLocal) []*core.NamedPlugin) core.WithPluginsOpt { - return &withPluginsOpt{listPlugins} -} - -// FlavorLocal glues together very minimal subset of cn-infra plugins -// that can be embedded inside different projects without running -// any agent specific server. -type FlavorLocal struct { - logRegistry logging.Registry - Logs logmanager.Plugin //needs to be first plugin (it updates log level from config) - ServiceLabel servicelabel.Plugin - StatusCheck statuscheck.Plugin - - injected bool -} - -// Inject injects logger into StatusCheck. -// Composite flavors embedding local flavor are supposed to call this -// method. -// Method returns in case the injection has been already executed. -func (f *FlavorLocal) Inject() bool { - if f.injected { - return false - } - f.injected = true - - declareFlags() - - f.Logs.Deps.LogRegistry = f.LogRegistry() - f.Logs.Deps.Log = f.LoggerFor("logs") - f.Logs.Deps.PluginName = core.PluginName("logs") - f.Logs.Deps.PluginConfig = config.ForPlugin("logs", LogsFlagDefault, LogsFlagUsage) - - f.StatusCheck.Deps.Log = f.LoggerFor("status-check") - f.StatusCheck.Deps.PluginName = core.PluginName("status-check") - - return true -} - -// Plugins combines all Plugins in flavor to the list -func (f *FlavorLocal) Plugins() []*core.NamedPlugin { - f.Inject() - return core.ListPluginsInFlavor(f) -} - -// LogRegistry for getting Logging Registry instance -// (not thread safe) -func (f *FlavorLocal) LogRegistry() logging.Registry { - if f.logRegistry == nil { - f.logRegistry = logrus.NewLogRegistry() - } - - return f.logRegistry -} - -// LoggerFor for getting PlugginLogger instance: -// - logger name is pre-initialized (see logging.ForPlugin) -// This method is just convenient shortcut for Flavor.Inject() -func (f *FlavorLocal) LoggerFor(pluginName string) logging.PluginLogger { - return logging.ForPlugin(pluginName, f.LogRegistry()) -} - -// LogDeps is a helper method for injecting PluginLogDeps dependencies with -// plugins from the Local flavor. -// argument value is injected as the plugin name. -// Injected logger uses the same name as the plugin (see logging.ForPlugin) -// This method is just a convenient shortcut to be used in Flavor.Inject() -// by flavors that embed the LocalFlavor. -func (f *FlavorLocal) LogDeps(pluginName string) *PluginLogDeps { - return &PluginLogDeps{ - logging.ForPlugin(pluginName, f.LogRegistry()), - core.PluginName(pluginName)} - -} - -// InfraDeps is a helper method for injecting PluginInfraDeps dependencies with -// plugins from the Local flavor. -// argument value is injected as the plugin name. -// Logging dependencies are resolved using the LogDeps() method. -// Plugin configuration file name is derived from the plugin name, -// see PluginConfig.GetConfigName(). -// This method is just a convenient shortcut to be used in Flavor.Inject() -// by flavors that embed the LocalFlavor.. -func (f *FlavorLocal) InfraDeps(pluginName string, opts ...InfraDepsOpts) *PluginInfraDeps { - if len(opts) == 1 { - if confOpt, ok := opts[0].(*ConfOpts); ok { - return &PluginInfraDeps{ - *f.LogDeps(pluginName), - config.ForPlugin(pluginName, confOpt.confDefault, confOpt.confUsage), - &f.StatusCheck, - &f.ServiceLabel} - } - } - - return &PluginInfraDeps{ - *f.LogDeps(pluginName), - config.ForPlugin(pluginName), - &f.StatusCheck, - &f.ServiceLabel} -} - -// InfraDepsOpts is to make typesafe the InfraDeps varargs -type InfraDepsOpts interface { - // InfraDepsOpts method is maker to declare implementation of InfraDepsOpts interface - InfraDepsOpts() -} - -// WithConf is a function to create option for InfraDeps() -// no need to pass opts (used for defining flag if it was not already defined), if so in this order: -// - default value -// - usage -func WithConf(deafultUsageOpts ...string) *ConfOpts { - if len(deafultUsageOpts) > 1 { - return &ConfOpts{deafultUsageOpts[0], deafultUsageOpts[1]} - } else if len(deafultUsageOpts) > 0 { - return &ConfOpts{deafultUsageOpts[0], ""} - } - - return &ConfOpts{} -} - -// ConfOpts is a structure that holds default value & usage for configuration flag -type ConfOpts struct { - confDefault, confUsage string -} - -// InfraDepsOpts method is maker to declare implementation of InfraDepsOpts interface -func (*ConfOpts) InfraDepsOpts() {} - -func declareFlags() { - if flag.Lookup(config.DirFlag) == nil { - flag.String(config.DirFlag, config.DirDefault, config.DirUsage) - } -} - -// withPluginsOpt is return value of local.WithPlugins() utility -// to easily define new plugins for the agent based on LocalFlavor. -type withPluginsOpt struct { - callback func(local *FlavorLocal) []*core.NamedPlugin -} - -// OptionMarkerCore is just for marking implementation that it implements this interface -func (opt *withPluginsOpt) OptionMarkerCore() {} - -// Plugins methods is here to implement core.WithPluginsOpt go interface -// is a callback that uses flavor input for dependency injection -// for custom plugins (returned as NamedPlugin) -func (opt *withPluginsOpt) Plugins(flavors ...core.Flavor) []*core.NamedPlugin { - for _, flavor := range flavors { - if f, ok := flavor.(*FlavorLocal); ok { - return opt.callback(f) - } - } - - panic("wrong usage of local.WithPlugin() for other than FlavorLocal") -} diff --git a/vendor/github.com/ligato/cn-infra/flavors/local/plugin_deps.go b/vendor/github.com/ligato/cn-infra/flavors/local/plugin_deps.go deleted file mode 100644 index 6b4493c2cd..0000000000 --- a/vendor/github.com/ligato/cn-infra/flavors/local/plugin_deps.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package local - -import ( - "github.com/ligato/cn-infra/config" - "github.com/ligato/cn-infra/core" - "github.com/ligato/cn-infra/health/statuscheck" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/servicelabel" -) - -// PluginLogDeps is minimal set of plugin dependencies that -// will probably use every plugin to: -// - log messages using plugin logger or child (prefixed) logger (in case plugin -// needs more than one) -// - to learn the plugin name -type PluginLogDeps struct { - Log logging.PluginLogger // inject - PluginName core.PluginName // inject -} - -// Close is called by Agent Core when the Agent is shutting down. -// It is supposed to clean up resources that were allocated by the plugin -// during its lifetime. This is a default empty implementation used to not bother -// plugins that do not need to implement this method. -func (plugin *PluginLogDeps) Close() error { - return nil -} - -// PluginInfraDeps is a standard set of plugin dependencies that -// will need probably every connector to DB/Messaging: -// - to report/write plugin status to StatusCheck -// - to know micro-service label prefix -type PluginInfraDeps struct { - PluginLogDeps // inject - config.PluginConfig // inject - StatusCheck statuscheck.PluginStatusWriter // inject - ServiceLabel servicelabel.ReaderAPI // inject -} diff --git a/vendor/github.com/ligato/cn-infra/flavors/rpc/doc.go b/vendor/github.com/ligato/cn-infra/flavors/rpc/doc.go deleted file mode 100644 index bd486f70de..0000000000 --- a/vendor/github.com/ligato/cn-infra/flavors/rpc/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package rpc defines "RPC" flavor, which combines all plugins that provide -// RPC-like access. -package rpc diff --git a/vendor/github.com/ligato/cn-infra/flavors/rpc/rpc_flavor.go b/vendor/github.com/ligato/cn-infra/flavors/rpc/rpc_flavor.go deleted file mode 100644 index 4c5cadf795..0000000000 --- a/vendor/github.com/ligato/cn-infra/flavors/rpc/rpc_flavor.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpc - -import ( - "github.com/ligato/cn-infra/core" - "github.com/ligato/cn-infra/flavors/local" - "github.com/ligato/cn-infra/health/probe" - "github.com/ligato/cn-infra/rpc/grpc" - "github.com/ligato/cn-infra/rpc/prometheus" - "github.com/ligato/cn-infra/rpc/rest" -) - -// NewAgent returns a new instance of the Agent with plugins. -// It is an alias for core.NewAgent() to implicit use of the FlavorRPC -func NewAgent(opts ...core.Option) *core.Agent { - return core.NewAgent(&FlavorRPC{}, opts...) -} - -// WithPlugins for adding custom plugins to SFC Controller -// is a callback that uses flavor input to -// inject dependencies for custom plugins that are in output -// -// Example: -// -// NewAgent(rpc.WithPlugins(func(flavor) { -// return []*core.NamedPlugin{{"my-plugin", &MyPlugin{DependencyXY: &flavor.GRPC}}} -// })) -func WithPlugins(listPlugins func(local *FlavorRPC) []*core.NamedPlugin) core.WithPluginsOpt { - return &withPluginsOpt{listPlugins} -} - -// FlavorRPC glues together multiple plugins that provide RPC-like access. -// They are typically used to enable remote management for other plugins. -type FlavorRPC struct { - *local.FlavorLocal - - HTTP rest.Plugin - HTTPProbe rest.ForkPlugin - Prometheus prometheus.Plugin - - HealthRPC probe.Plugin - PrometheusRPC probe.PrometheusPlugin - - GRPC grpc.Plugin - - injected bool -} - -// Inject initializes flavor references/dependencies. -func (f *FlavorRPC) Inject() bool { - if f.injected { - return false - } - f.injected = true - - if f.FlavorLocal == nil { - f.FlavorLocal = &local.FlavorLocal{} - } - f.FlavorLocal.Inject() - - rest.DeclareHTTPPortFlag("http") - httpPlugDeps := *f.InfraDeps("http", local.WithConf()) - f.HTTP.Deps.Log = httpPlugDeps.Log - f.HTTP.Deps.PluginConfig = httpPlugDeps.PluginConfig - f.HTTP.Deps.PluginName = httpPlugDeps.PluginName - - f.Logs.HTTP = &f.HTTP - - f.Prometheus.Deps.PluginInfraDeps = *f.InfraDeps("prometheus") - f.Prometheus.HTTP = &f.HTTPProbe - - grpc.DeclareGRPCPortFlag("grpc") - grpcPlugDeps := *f.InfraDeps("grpc", local.WithConf()) - f.GRPC.Deps.Log = grpcPlugDeps.Log - f.GRPC.Deps.PluginConfig = grpcPlugDeps.PluginConfig - f.GRPC.Deps.PluginName = grpcPlugDeps.PluginName - f.GRPC.Deps.HTTP = &f.HTTP - - rest.DeclareHTTPPortFlag("http-probe") - httpProbeDeps := *f.InfraDeps("http-probe", local.WithConf()) - f.HTTPProbe.Deps.Log = httpProbeDeps.Log - f.HTTPProbe.Deps.PluginConfig = httpProbeDeps.PluginConfig - f.HTTPProbe.Deps.PluginName = httpProbeDeps.PluginName - f.HTTPProbe.Deps.DefaultHTTP = &f.HTTP - - f.HealthRPC.Deps.PluginInfraDeps = *f.InfraDeps("health-rpc") - f.HealthRPC.Deps.HTTP = &f.HTTPProbe - f.HealthRPC.Deps.StatusCheck = &f.StatusCheck - //TODO f.HealthRPC.Transport inject restsync - - f.PrometheusRPC.PluginInfraDeps = *f.InfraDeps("health-prometheus-rpc") - f.PrometheusRPC.Prometheus = &f.Prometheus - f.PrometheusRPC.StatusCheck = &f.StatusCheck - - return true -} - -// Plugins combines all Plugins in flavor to the list. -func (f *FlavorRPC) Plugins() []*core.NamedPlugin { - f.Inject() - return core.ListPluginsInFlavor(f) -} - -// withPluginsOpt is return value of rpc.WithPlugins() utility -// to easily define new plugins for the agent based on LocalFlavor. -type withPluginsOpt struct { - callback func(local *FlavorRPC) []*core.NamedPlugin -} - -// OptionMarkerCore is just for marking implementation that it implements this interface -func (opt *withPluginsOpt) OptionMarkerCore() {} - -// Plugins methods is here to implement core.WithPluginsOpt go interface -// is a callback that uses flavor input for dependency injection -// for custom plugins (returned as NamedPlugin) -func (opt *withPluginsOpt) Plugins(flavors ...core.Flavor) []*core.NamedPlugin { - for _, flavor := range flavors { - if f, ok := flavor.(*FlavorRPC); ok { - return opt.callback(f) - } - } - - panic("wrong usage of rpc.WithPlugin() for other than FlavorRPC") -} diff --git a/vendor/github.com/ligato/cn-infra/health/probe/deps_probe.go b/vendor/github.com/ligato/cn-infra/health/probe/deps_probe.go deleted file mode 100644 index 41119409f3..0000000000 --- a/vendor/github.com/ligato/cn-infra/health/probe/deps_probe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package probe implements Liveness/Readiness/Prometheus health/metrics HTTP handlers. -package probe - -import ( - "github.com/ligato/cn-infra/flavors/local" - "github.com/ligato/cn-infra/health/statuscheck" - "github.com/ligato/cn-infra/rpc/prometheus" - "github.com/ligato/cn-infra/rpc/rest" -) - -// Deps lists dependencies of REST plugin. -type Deps struct { - local.PluginInfraDeps // inject - HTTP rest.HTTPHandlers // inject - StatusCheck statuscheck.StatusReader // inject -} - -// PrometheusDeps lists dependencies of Prometheus plugin. -type PrometheusDeps struct { - local.PluginInfraDeps // inject - HTTP rest.HTTPHandlers // inject - StatusCheck statuscheck.StatusReader // inject - Prometheus prometheus.API // inject -} diff --git a/vendor/github.com/ligato/cn-infra/health/probe/doc.go b/vendor/github.com/ligato/cn-infra/health/probe/doc.go deleted file mode 100644 index 5c7f874049..0000000000 --- a/vendor/github.com/ligato/cn-infra/health/probe/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package probe implements HTTP probes: the K8s readiness and liveliness probe handlers + Prometheus format. -package probe diff --git a/vendor/github.com/ligato/cn-infra/health/probe/plugin_impl_probes.go b/vendor/github.com/ligato/cn-infra/health/probe/plugin_impl_probes.go deleted file mode 100644 index e55d32f881..0000000000 --- a/vendor/github.com/ligato/cn-infra/health/probe/plugin_impl_probes.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package probe - -import ( - "encoding/json" - "net/http" - - "github.com/ligato/cn-infra/health/statuscheck/model/status" - "github.com/unrolled/render" -) - -const ( - livenessProbePath string = "/liveness" // liveness probe URL - readinessProbePath string = "/readiness" // readiness probe URL -) - -// Plugin struct holds all plugin-related data. -type Plugin struct { - Deps -} - -// Init does nothing -func (p *Plugin) Init() (err error) { - return nil -} - -// AfterInit registers HTTP handlers for liveness and readiness probes. -func (p *Plugin) AfterInit() error { - if p.HTTP != nil { - if p.StatusCheck != nil { - p.Log.Infof("Starting health http-probe on port %v", p.HTTP.GetPort()) - p.HTTP.RegisterHTTPHandler(livenessProbePath, p.livenessProbeHandler, "GET") - p.HTTP.RegisterHTTPHandler(readinessProbePath, p.readinessProbeHandler, "GET") - - } else { - p.Log.Info("Unable to register http-probe handler, StatusCheck is nil") - } - } else { - p.Log.Info("Unable to register http-probe handler, HTTP is nil") - } - - return nil -} - -// readinessProbeHandler handles k8s readiness probe. -func (p *Plugin) readinessProbeHandler(formatter *render.Render) http.HandlerFunc { - - return func(w http.ResponseWriter, req *http.Request) { - ifStat := p.StatusCheck.GetInterfaceStats() - agentStat := p.StatusCheck.GetAgentStatus() - agentStat.InterfaceStats = &ifStat - agentStatJSON, _ := json.Marshal(agentStat) - if agentStat.State == status.OperationalState_OK { - w.WriteHeader(http.StatusOK) - } else { - w.WriteHeader(http.StatusInternalServerError) - } - w.Write(agentStatJSON) - } -} - -// livenessProbeHandler handles k8s liveness probe. -func (p *Plugin) livenessProbeHandler(formatter *render.Render) http.HandlerFunc { - - return func(w http.ResponseWriter, req *http.Request) { - stat := p.StatusCheck.GetAgentStatus() - statJSON, _ := json.Marshal(p.StatusCheck.GetAgentStatus()) - - if stat.State == status.OperationalState_INIT || stat.State == status.OperationalState_OK { - w.WriteHeader(http.StatusOK) - w.Write(statJSON) - } else { - w.WriteHeader(http.StatusInternalServerError) - w.Write(statJSON) - } - } -} - -// String returns plugin name if it was injected, "HEALTH_RPC_PROBES" otherwise. -func (p *Plugin) String() string { - if len(string(p.PluginName)) > 0 { - return string(p.PluginName) - } - return "HEALTH_RPC_PROBES" -} diff --git a/vendor/github.com/ligato/cn-infra/health/probe/plugin_impl_prometheus.go b/vendor/github.com/ligato/cn-infra/health/probe/plugin_impl_prometheus.go deleted file mode 100644 index 0c4873bddb..0000000000 --- a/vendor/github.com/ligato/cn-infra/health/probe/plugin_impl_prometheus.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package probe - -import ( - "github.com/ligato/cn-infra/health/statuscheck/model/status" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -const ( - defaultPluginName string = "PROMETHEUS" - - // DefaultHealthPath default Prometheus health metrics URL - DefaultHealthPath string = "/health" - - // Namespace namespace to use for Prometheus health metrics - Namespace string = "" - // Subsystem subsystem to use for Prometheus health metrics - Subsystem string = "" - // ServiceLabel label for service field - ServiceLabel string = "service" - // DependencyLabel label for dependency field - DependencyLabel string = "dependency" - // BuildVersionLabel label for build version field - BuildVersionLabel string = "build_version" - // BuildDateLabel label for build date field - BuildDateLabel string = "build_date" - - // ServiceHealthName name of service health metric - ServiceHealthName string = "service_health" - - // ServiceHealthHelp help text for service health metric - // Adapt Ligato status code for now. - // TODO: Consolidate with that from the "Common Container Telemetry" proposal. - // ServiceHealthHelp string = "The health of the ServiceLabel 0 = INIT, 1 = UP, 2 = DOWN, 3 = OUTAGE" - ServiceHealthHelp string = "The health of the ServiceLabel 0 = INIT, 1 = OK, 2 = ERROR" - - // DependencyHealthName name of dependency health metric - DependencyHealthName string = "service_dependency_health" - - // DependencyHealthHelp help text for dependency health metric - // Adapt Ligato status code for now. - // TODO: Consolidate with that from the "Common Container Telemetry" proposal. - // DependencyHealthHelp string = "The health of the DependencyLabel 0 = INIT, 1 = UP, 2 = DOWN, 3 = OUTAGE" - DependencyHealthHelp string = "The health of the DependencyLabel 0 = INIT, 1 = OK, 2 = ERROR" - - // ServiceInfoName name of service info metric - ServiceInfoName string = "service_info" - // ServiceInfoHelp help text for service info metric - ServiceInfoHelp string = "Build info for the service. Value is always 1, build info is in the tags." -) - -// PrometheusPlugin struct holds all plugin-related data. -type PrometheusPlugin struct { - PrometheusDeps -} - -// Init may create a new (custom) instance of HTTP if the injected instance uses -// different HTTP port than requested. -func (p *PrometheusPlugin) Init() (err error) { - - if p.Prometheus != nil && p.StatusCheck != nil { - err := p.Prometheus.NewRegistry(DefaultHealthPath, promhttp.HandlerOpts{}) - if err != nil { - return err - } - p.Prometheus.RegisterGaugeFunc( - DefaultHealthPath, - Namespace, - Subsystem, - ServiceHealthName, - ServiceHealthHelp, - prometheus.Labels{ServiceLabel: p.getServiceLabel()}, - p.getServiceHealth, - ) - agentStatus := p.StatusCheck.GetAgentStatus() - p.Prometheus.RegisterGaugeFunc( - DefaultHealthPath, - Namespace, - Subsystem, - ServiceInfoName, - ServiceInfoHelp, - prometheus.Labels{ - ServiceLabel: p.getServiceLabel(), - BuildVersionLabel: agentStatus.BuildVersion, - BuildDateLabel: agentStatus.BuildDate}, - func() float64 { return 1 }, - ) - } - - return nil -} - -// AfterInit registers HTTP handlers. -func (p *PrometheusPlugin) AfterInit() error { - - //TODO: Need improvement - instead of the exposing the map directly need to use in-memory mapping - if p.StatusCheck != nil { - allPluginStatusMap := p.StatusCheck.GetAllPluginStatus() - for k, v := range allPluginStatusMap { - p.Log.Infof("k=%v, v=%v, state=%v", k, v, v.State) - p.Prometheus.RegisterGaugeFunc( - DefaultHealthPath, - Namespace, - Subsystem, - DependencyHealthName, - DependencyHealthHelp, - prometheus.Labels{ - ServiceLabel: p.getServiceLabel(), - DependencyLabel: k, - }, - p.getDependencyHealth(k, v), - ) - } - } else { - p.Log.Error("PluginStatusCheck is nil") - } - - return nil -} - -// Close shutdowns HTTP if a custom instance was created in Init(). -func (p *PrometheusPlugin) Close() error { - return nil -} - -// getServiceHealth returns agent health status -func (p *PrometheusPlugin) getServiceHealth() float64 { - agentStatus := p.StatusCheck.GetAgentStatus() - // Adapt Ligato status code for now. - // TODO: Consolidate with that from the "Common Container Telemetry" proposal. - health := float64(agentStatus.State) - p.Log.Infof("ServiceHealth: %v", health) - return health -} - -// getDependencyHealth returns plugin health status -func (p *PrometheusPlugin) getDependencyHealth(pluginName string, pluginStatus *status.PluginStatus) func() float64 { - p.Log.Infof("DependencyHealth for plugin %v: %v", pluginName, float64(pluginStatus.State)) - - return func() float64 { - health := float64(pluginStatus.State) - depName := pluginName - p.Log.Infof("Dependency Health %v: %v", depName, health) - return health - } -} - -// String returns plugin name if it was injected, defaultPluginName otherwise. -func (p *PrometheusPlugin) String() string { - if len(string(p.PluginName)) > 0 { - return string(p.PluginName) - } - return defaultPluginName -} - -func (p *PrometheusPlugin) getServiceLabel() string { - serviceLabel := p.String() - if p.ServiceLabel != nil { - serviceLabel = p.ServiceLabel.GetAgentLabel() - } - return serviceLabel -} diff --git a/vendor/github.com/ligato/cn-infra/health/statuscheck/options.go b/vendor/github.com/ligato/cn-infra/health/statuscheck/options.go new file mode 100644 index 0000000000..69f7b07c83 --- /dev/null +++ b/vendor/github.com/ligato/cn-infra/health/statuscheck/options.go @@ -0,0 +1,49 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package statuscheck + +import ( + "github.com/ligato/cn-infra/logging" +) + +// DefaultPlugin is a default instance of Plugin. +var DefaultPlugin = *NewPlugin() + +// NewPlugin creates a new Plugin with the provided Options. +func NewPlugin(opts ...Option) *Plugin { + p := &Plugin{} + + p.PluginName = "status-check" + + for _, o := range opts { + o(p) + } + + if p.Deps.Log == nil { + p.Deps.Log = logging.ForPlugin(p.String()) + } + + return p +} + +// Option is a function that can be used in NewPlugin to customize Plugin. +type Option func(*Plugin) + +// UseDeps returns Option that can inject custom dependencies. +func UseDeps(cb func(*Deps)) Option { + return func(p *Plugin) { + cb(&p.Deps) + } +} diff --git a/vendor/github.com/ligato/cn-infra/health/statuscheck/plugin_api_statuscheck.go b/vendor/github.com/ligato/cn-infra/health/statuscheck/plugin_api_statuscheck.go index 60928fc766..8b8af77172 100644 --- a/vendor/github.com/ligato/cn-infra/health/statuscheck/plugin_api_statuscheck.go +++ b/vendor/github.com/ligato/cn-infra/health/statuscheck/plugin_api_statuscheck.go @@ -16,8 +16,8 @@ package statuscheck import ( "github.com/gogo/protobuf/proto" - "github.com/ligato/cn-infra/core" "github.com/ligato/cn-infra/health/statuscheck/model/status" + "github.com/ligato/cn-infra/infra" ) //go:generate protoc --proto_path=model/status --gogo_out=model/status model/status/status.proto @@ -26,6 +26,15 @@ import ( // of a plugin. type PluginState string +const ( + // Init state means that the initialization of the plugin is in progress. + Init PluginState = "init" + // OK state means that the plugin is healthy. + OK PluginState = "ok" + // Error state means that some error has occurred in the plugin. + Error PluginState = "error" +) + // PluginStateProbe defines parameters of a function used for plugin state // probing, referred to as "probe". type PluginStateProbe func() (PluginState, error) @@ -36,19 +45,19 @@ type PluginStatusWriter interface { // If is not nil, Statuscheck will periodically probe the plugin // state through the provided function. Otherwise, it is expected that the // plugin itself will report state updates through ReportStateChange(). - Register(pluginName core.PluginName, probe PluginStateProbe) + Register(pluginName infra.PluginName, probe PluginStateProbe) // ReportStateChange can be used to report a change in the status // of a previously registered plugin. It is not a bug, however, to report // the same status in consecutive calls. Statuscheck is smart enough // to detect an actual status change and propagate only updates to remote // clients. - ReportStateChange(pluginName core.PluginName, state PluginState, lastError error) + ReportStateChange(pluginName infra.PluginName, state PluginState, lastError error) // ReportStateChangeWithMeta can be used to report a change in the status // of a previously registered plugin with added metadata value stored in // global agent status. Metadata type is specified in statuscheck model. - ReportStateChangeWithMeta(pluginName core.PluginName, state PluginState, lastError error, meta proto.Message) + ReportStateChangeWithMeta(pluginName infra.PluginName, state PluginState, lastError error, meta proto.Message) } // AgentStatusReader allows to lookup agent status by other plugins. diff --git a/vendor/github.com/ligato/cn-infra/health/statuscheck/plugin_impl_statuscheck.go b/vendor/github.com/ligato/cn-infra/health/statuscheck/plugin_impl_statuscheck.go index d106ab6cc7..3ffa355ec0 100644 --- a/vendor/github.com/ligato/cn-infra/health/statuscheck/plugin_impl_statuscheck.go +++ b/vendor/github.com/ligato/cn-infra/health/statuscheck/plugin_impl_statuscheck.go @@ -20,25 +20,18 @@ import ( "time" "github.com/gogo/protobuf/proto" - "github.com/ligato/cn-infra/core" + "github.com/ligato/cn-infra/agent" "github.com/ligato/cn-infra/datasync" "github.com/ligato/cn-infra/health/statuscheck/model/status" + "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/logging" ) -const ( - // Init state means that the initialization of the plugin is in progress. - Init PluginState = "init" - // OK state means that the plugin is healthy. - OK PluginState = "ok" - // Error state means that some error has occurred in the plugin. - Error PluginState = "error" - - // frequency of periodic writes of state data into ETCD - periodicWriteTimeout time.Duration = time.Second * 10 - - // frequency of periodic plugin state probing - periodicProbingTimeout time.Duration = time.Second * 5 +var ( + // PeriodicWriteTimeout is frequency of periodic writes of state data into ETCD. + PeriodicWriteTimeout = time.Second * 10 + // PeriodicProbingTimeout is frequency of periodic plugin state probing. + PeriodicProbingTimeout = time.Second * 5 ) // Plugin struct holds all plugin-related data. @@ -59,21 +52,21 @@ type Plugin struct { // Deps lists the dependencies of statuscheck plugin. type Deps struct { - Log logging.PluginLogger // inject - PluginName core.PluginName // inject - Transport datasync.KeyProtoValWriter // inject (optional) + infra.PluginName // inject + Log logging.PluginLogger // inject + Transport datasync.KeyProtoValWriter // inject (optional) } // Init prepares the initial status data. func (p *Plugin) Init() error { // write initial status data into ETCD p.agentStat = &status.AgentStatus{ - BuildVersion: core.BuildVersion, - BuildDate: core.BuildDate, State: status.OperationalState_INIT, + BuildVersion: agent.BuildVersion, + BuildDate: agent.BuildDate, + CommitHash: agent.CommitHash, StartTime: time.Now().Unix(), LastChange: time.Now().Unix(), - CommitHash: core.CommitHash, } // initial empty interface status @@ -124,7 +117,7 @@ func (p *Plugin) Close() error { } // Register a plugin for status change reporting. -func (p *Plugin) Register(pluginName core.PluginName, probe PluginStateProbe) { +func (p *Plugin) Register(pluginName infra.PluginName, probe PluginStateProbe) { p.access.Lock() defer p.access.Unlock() @@ -145,13 +138,13 @@ func (p *Plugin) Register(pluginName core.PluginName, probe PluginStateProbe) { } // ReportStateChange can be used to report a change in the status of a previously registered plugin. -func (p *Plugin) ReportStateChange(pluginName core.PluginName, state PluginState, lastError error) { +func (p *Plugin) ReportStateChange(pluginName infra.PluginName, state PluginState, lastError error) { p.reportStateChange(pluginName, state, lastError) } // ReportStateChangeWithMeta can be used to report a change in the status of a previously registered plugin and report // the specific metadata state -func (p *Plugin) ReportStateChangeWithMeta(pluginName core.PluginName, state PluginState, lastError error, meta proto.Message) { +func (p *Plugin) ReportStateChangeWithMeta(pluginName infra.PluginName, state PluginState, lastError error, meta proto.Message) { p.reportStateChange(pluginName, state, lastError) switch data := meta.(type) { @@ -162,7 +155,7 @@ func (p *Plugin) ReportStateChangeWithMeta(pluginName core.PluginName, state Plu } } -func (p *Plugin) reportStateChange(pluginName core.PluginName, state PluginState, lastError error) { +func (p *Plugin) reportStateChange(pluginName infra.PluginName, state PluginState, lastError error) { p.access.Lock() defer p.access.Unlock() @@ -260,7 +253,7 @@ func (p *Plugin) publishAgentData() error { } // publishPluginData writes the current plugin state into ETCD. -func (p *Plugin) publishPluginData(pluginName core.PluginName, pluginStat *status.PluginStatus) error { +func (p *Plugin) publishPluginData(pluginName infra.PluginName, pluginStat *status.PluginStatus) error { pluginStat.LastUpdate = time.Now().Unix() if p.Transport != nil { return p.Transport.Put(status.PluginStatusKey(string(pluginName)), pluginStat) @@ -275,7 +268,7 @@ func (p *Plugin) publishAllData() { p.publishAgentData() for name, s := range p.pluginStat { - p.publishPluginData(core.PluginName(name), s) + p.publishPluginData(infra.PluginName(name), s) } } @@ -287,10 +280,10 @@ func (p *Plugin) periodicProbing(ctx context.Context) { for { select { - case <-time.After(periodicProbingTimeout): + case <-time.After(PeriodicProbingTimeout): for pluginName, probe := range p.pluginProbe { state, lastErr := probe() - p.ReportStateChange(core.PluginName(pluginName), state, lastErr) + p.ReportStateChange(infra.PluginName(pluginName), state, lastErr) // just check in-between probes if the plugin is closing select { case <-ctx.Done(): @@ -313,7 +306,7 @@ func (p *Plugin) periodicUpdates(ctx context.Context) { for { select { - case <-time.After(periodicWriteTimeout): + case <-time.After(PeriodicWriteTimeout): p.publishAllData() case <-ctx.Done(): @@ -329,6 +322,23 @@ func (p *Plugin) getAgentState() status.OperationalState { return p.agentStat.State } +// GetAllPluginStatus returns a map containing pluginname and its status, for all plugins +func (p *Plugin) GetAllPluginStatus() map[string]*status.PluginStatus { + //TODO - used currently, will be removed after incoporating improvements for exposing copy of map + p.access.Lock() + defer p.access.Unlock() + + return p.pluginStat +} + +// GetInterfaceStats returns current global operational status of interfaces +func (p *Plugin) GetInterfaceStats() status.InterfaceStats { + p.access.Lock() + defer p.access.Unlock() + + return *p.interfaceStat +} + // GetAgentStatus return current global operational state of the agent. func (p *Plugin) GetAgentStatus() status.AgentStatus { p.access.Lock() @@ -347,20 +357,3 @@ func stateToProto(state PluginState) status.OperationalState { return status.OperationalState_ERROR } } - -// GetAllPluginStatus returns a map containing pluginname and its status, for all plugins -func (p *Plugin) GetAllPluginStatus() map[string]*status.PluginStatus { - //TODO - used currently, will be removed after incoporating improvements for exposing copy of map - p.access.Lock() - defer p.access.Unlock() - - return p.pluginStat -} - -// GetInterfaceStats returns current global operational status of interfaces -func (p *Plugin) GetInterfaceStats() status.InterfaceStats { - p.access.Lock() - defer p.access.Unlock() - - return *p.interfaceStat -} diff --git a/vendor/github.com/ligato/cn-infra/idxmap/api.go b/vendor/github.com/ligato/cn-infra/idxmap/api.go index aa47c77494..30b70c879d 100644 --- a/vendor/github.com/ligato/cn-infra/idxmap/api.go +++ b/vendor/github.com/ligato/cn-infra/idxmap/api.go @@ -14,9 +14,7 @@ package idxmap -import ( - "github.com/ligato/cn-infra/core" -) +import "github.com/ligato/cn-infra/infra" // NamedMapping is the "user API" to the mapping. It provides read-only access. type NamedMapping interface { @@ -45,7 +43,7 @@ type NamedMapping interface { // // map.Watch(plugin.PluginName, func(msgNamedMappingGenericEvent) {/*handle callback*/ return nil}) // - Watch(subscriber core.PluginName, callback func(NamedMappingGenericEvent)) error + Watch(subscriber infra.PluginName, callback func(NamedMappingGenericEvent)) error } // NamedMappingRW is the "owner API" to the mapping. Using this API the owner diff --git a/vendor/github.com/ligato/cn-infra/idxmap/chan.go b/vendor/github.com/ligato/cn-infra/idxmap/chan.go index f5fcabb09e..03c9f453ba 100644 --- a/vendor/github.com/ligato/cn-infra/idxmap/chan.go +++ b/vendor/github.com/ligato/cn-infra/idxmap/chan.go @@ -17,7 +17,6 @@ package idxmap import ( "time" - "github.com/ligato/cn-infra/core" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" ) @@ -33,14 +32,14 @@ func ToChan(ch chan NamedMappingGenericEvent, opts ...interface{}) func(dto Name timeout := DefaultNotifTimeout var logger logging.Logger = logrus.DefaultLogger() - for _, opt := range opts { + /*for _, opt := range opts { switch opt.(type) { case *core.WithLoggerOpt: logger = opt.(*core.WithLoggerOpt).Logger case *core.WithTimeoutOpt: timeout = opt.(*core.WithTimeoutOpt).Timeout } - } + }*/ return func(dto NamedMappingGenericEvent) { select { diff --git a/vendor/github.com/ligato/cn-infra/idxmap/mem/inmemory_name_mapping.go b/vendor/github.com/ligato/cn-infra/idxmap/mem/inmemory_name_mapping.go index 3a55e7473a..84605995a8 100644 --- a/vendor/github.com/ligato/cn-infra/idxmap/mem/inmemory_name_mapping.go +++ b/vendor/github.com/ligato/cn-infra/idxmap/mem/inmemory_name_mapping.go @@ -18,8 +18,8 @@ import ( "fmt" "sync" - "github.com/ligato/cn-infra/core" "github.com/ligato/cn-infra/idxmap" + "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/logging" ) @@ -152,7 +152,7 @@ func (mem *memNamedMapping) ListNames(field string, value string) []string { // Watch allows to subscribe for tracking changes in the mapping. // When an item is added or removed, the given is triggered. -func (mem *memNamedMapping) Watch(subscriber core.PluginName, callback func(idxmap.NamedMappingGenericEvent)) error { +func (mem *memNamedMapping) Watch(subscriber infra.PluginName, callback func(idxmap.NamedMappingGenericEvent)) error { mem.Debug("Watch ", subscriber) _, found := mem.subscribers.LoadOrStore(subscriber, callback) @@ -238,7 +238,7 @@ func (mem *memNamedMapping) putNameToIdxSync(name string, metadata interface{}) func (mem *memNamedMapping) publishAddToChannel(name string, value interface{}) { mem.subscribers.Range(func(key, val interface{}) bool { - subscriber := key.(core.PluginName) + subscriber := key.(infra.PluginName) clb := val.(func(idxmap.NamedMappingGenericEvent)) if clb != nil { @@ -261,7 +261,7 @@ func (mem *memNamedMapping) publishAddToChannel(name string, value interface{}) func (mem *memNamedMapping) publishUpdateToChannel(name string, value interface{}) { mem.subscribers.Range(func(key, val interface{}) bool { - subscriber := key.(core.PluginName) + subscriber := key.(infra.PluginName) clb := val.(func(idxmap.NamedMappingGenericEvent)) if clb != nil { @@ -284,7 +284,7 @@ func (mem *memNamedMapping) publishUpdateToChannel(name string, value interface{ func (mem *memNamedMapping) publishDelToChannel(name string, value interface{}) { mem.subscribers.Range(func(key, val interface{}) bool { - subscriber := key.(core.PluginName) + subscriber := key.(infra.PluginName) clb := val.(func(idxmap.NamedMappingGenericEvent)) if clb != nil { diff --git a/vendor/github.com/ligato/cn-infra/infra/infra.go b/vendor/github.com/ligato/cn-infra/infra/infra.go new file mode 100644 index 0000000000..c1de4ccd0f --- /dev/null +++ b/vendor/github.com/ligato/cn-infra/infra/infra.go @@ -0,0 +1,49 @@ +package infra + +import ( + "github.com/ligato/cn-infra/config" + "github.com/ligato/cn-infra/logging" +) + +// Plugin interface defines plugin's basic life-cycle methods. +type Plugin interface { + // Init is called in the agent`s startup phase. + Init() error + // Close is called in the agent`s cleanup phase. + Close() error + // String returns unique name of the plugin. + String() string +} + +// PostInit interface defines an optional method for plugins with additional initialization. +type PostInit interface { + // AfterInit is called once Init() of all plugins have returned without error. + AfterInit() error +} + +// PluginName is a part of the plugin's API. +// It's used by embedding it into Plugin to +// provide unique name of the plugin. +type PluginName string + +// String returns the PluginName. +func (name PluginName) String() string { + return string(name) +} + +// SetName sets plugin name. +func (name *PluginName) SetName(n string) { + *name = PluginName(n) +} + +// Deps defines common dependencies for use in Plugins. +// It can easily be embedded in Deps for Plugin: +// type Deps struct { +// infra.Deps +// // other dependencies +// } +type Deps struct { + PluginName + Log logging.PluginLogger + config.PluginConfig +} diff --git a/vendor/github.com/ligato/cn-infra/logging/log_api.go b/vendor/github.com/ligato/cn-infra/logging/log_api.go index e54a344673..d7dc25f400 100644 --- a/vendor/github.com/ligato/cn-infra/logging/log_api.go +++ b/vendor/github.com/ligato/cn-infra/logging/log_api.go @@ -16,8 +16,13 @@ package logging import "fmt" -// Fields is a type accepted by WithFields method. It can be used to instantiate map using shorter notation. -type Fields map[string]interface{} +var ( + // DefaultLogger is the default logger + DefaultLogger Logger + + // DefaultRegistry is the default logging registry + DefaultRegistry Registry +) // LogLevel represents severity of log record type LogLevel uint32 @@ -57,21 +62,8 @@ func (level LogLevel) String() string { return fmt.Sprintf("unknown(%d)", level) } -// Logger provides logging capabilities -type Logger interface { - // GetName return the logger name - GetName() string - // SetLevel modifies the LogLevel - SetLevel(level LogLevel) - // GetLevel returns currently set logLevel - GetLevel() LogLevel - // WithField creates one structured field - WithField(key string, value interface{}) LogWithLevel - // WithFields creates multiple structured fields - WithFields(fields map[string]interface{}) LogWithLevel - - LogWithLevel -} +// Fields is a type accepted by WithFields method. It can be used to instantiate map using shorter notation. +type Fields map[string]interface{} // LogWithLevel allows to log with different log levels type LogWithLevel interface { @@ -94,28 +86,31 @@ type LogWithLevel interface { Println(v ...interface{}) } -// LogFactory is API for the plugins that want to create their own loggers. -type LogFactory interface { - NewLogger(name string) Logger -} +// Logger provides logging capabilities +type Logger interface { + // GetName return the logger name + GetName() string + // SetLevel modifies the LogLevel + SetLevel(level LogLevel) + // GetLevel returns currently set logLevel + GetLevel() LogLevel + // WithField creates one structured field + WithField(key string, value interface{}) LogWithLevel + // WithFields creates multiple structured fields + WithFields(fields Fields) LogWithLevel -// PluginLogger is intended for: -// 1. small plugins (that just need one logger; name corresponds to plugin name) -// 2. large plugins that need multiple loggers (all loggers share same name prefix) -type PluginLogger interface { - // Plugin has by default possibility to log - // Logger name is initialized with plugin name - Logger + LogWithLevel +} - // LogFactory can be optionally used by large plugins - // to create child loggers (their names are prefixed by plugin logger name) - LogFactory +// LoggerFactory is API for the plugins that want to create their own loggers. +type LoggerFactory interface { + NewLogger(name string) Logger } // Registry groups multiple Logger instances and allows to mange their log levels. type Registry interface { - // LogFactory allow to create new loggers - LogFactory + // LoggerFactory allow to create new loggers + LoggerFactory // List Loggers returns a map (loggerName => log level) ListLoggers() map[string]string // SetLevel modifies log level of selected logger in the registry @@ -128,31 +123,49 @@ type Registry interface { ClearRegistry() } +// PluginLogger is intended for: +// 1. small plugins (that just need one logger; name corresponds to plugin name) +// 2. large plugins that need multiple loggers (all loggers share same name prefix) +type PluginLogger interface { + // Plugin has by default possibility to log + // Logger name is initialized with plugin name + Logger + // LoggerFactory can be optionally used by large plugins + // to create child loggers (their names are prefixed by plugin logger name) + LoggerFactory +} + // ForPlugin is used to initialize plugin logger by name // and optionally created children (their name prefixed by plugin logger name) -// -// Example usage: -// -// flavor.ETCD.Logger = -// ForPlugin(PluginNameOfFlavor(&flavor.ETCD, flavor), flavor.Logrus) -// -func ForPlugin(name string, factory LogFactory) PluginLogger { +func ForPlugin(name string) PluginLogger { + if logger, found := DefaultRegistry.Lookup(name); found { + DefaultLogger.Debugf("using plugin logger for %q that was already initialized", name) + return &pluginLogger{ + Logger: logger, + LoggerFactory: &prefixedLoggerFactory{name, DefaultRegistry}, + } + } + return NewPluginLogger(name, DefaultRegistry) +} + +// NewPluginLogger creates new logger with given LoggerFactory. +func NewPluginLogger(name string, factory LoggerFactory) PluginLogger { return &pluginLogger{ - Logger: factory.NewLogger(name), - LogFactory: &prefixedLogFactory{name, factory}, + Logger: factory.NewLogger(name), + LoggerFactory: &prefixedLoggerFactory{name, factory}, } } -func (factory *prefixedLogFactory) NewLogger(name string) Logger { - return factory.delegate.NewLogger(factory.prefix + name) +type pluginLogger struct { + Logger + LoggerFactory } -type prefixedLogFactory struct { - prefix string - delegate LogFactory +type prefixedLoggerFactory struct { + prefix string + factory LoggerFactory } -type pluginLogger struct { - Logger - LogFactory +func (p *prefixedLoggerFactory) NewLogger(name string) Logger { + return p.factory.NewLogger(p.prefix + name) } diff --git a/vendor/github.com/ligato/cn-infra/logging/logging.conf b/vendor/github.com/ligato/cn-infra/logging/logging.conf index 084bcbf696..8466c22a97 100644 --- a/vendor/github.com/ligato/cn-infra/logging/logging.conf +++ b/vendor/github.com/ligato/cn-infra/logging/logging.conf @@ -1,11 +1,11 @@ -# Set default config level for every plugin -default-level: debug +# Set default config level for every plugin. Overwritten by environmental variable 'INITIAL_LOGLVL' +default-level: info # Specifies a list of named loggers with respective log level loggers: - name: "agentcore", - level: info - - name: "status-check", level: debug + - name: "status-check", + level: info - name: "linux-plugin", - level: info \ No newline at end of file + level: warn \ No newline at end of file diff --git a/vendor/github.com/ligato/cn-infra/logging/logmanager/README.md b/vendor/github.com/ligato/cn-infra/logging/logmanager/README.md deleted file mode 100644 index 8382aeb7aa..0000000000 --- a/vendor/github.com/ligato/cn-infra/logging/logmanager/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Log Manager - -Log manager plugin allows to view and modify log levels of loggers using REST API. - -**API** -- List all registered loggers: - - ```curl -X GET http://:/log/list``` -- Set log level for a registered logger: - ```curl -X PUT http://:/log//``` - - `` is one of `debug`,`info`,`warning`,`error`,`fatal`,`panic` - -`` and `` are determined by configuration of rest.Plugin. - -**Config file** - -- Logger config file is composed of two parts: the default level applied for all plugins, - and a map where every logger can have its own log level defined. See config file - [example](/logging/logging.conf) to learn how to define it. - - **Note:** initial log level can be set using environmental variable `INITIAL_LOGLVL`. The - variable replaces default-level from configuration file. However, loggers (partial definition) - replace default value set by environmental variable for specific loggers defined. - \ No newline at end of file diff --git a/vendor/github.com/ligato/cn-infra/logging/logmanager/doc.go b/vendor/github.com/ligato/cn-infra/logging/logmanager/doc.go deleted file mode 100644 index 53cf1e605c..0000000000 --- a/vendor/github.com/ligato/cn-infra/logging/logmanager/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package logmanager implements the log manager that allows users to set -// log levels at run-time via a REST API. -package logmanager diff --git a/vendor/github.com/ligato/cn-infra/logging/logmanager/plugin_impl_log_manager.go b/vendor/github.com/ligato/cn-infra/logging/logmanager/plugin_impl_log_manager.go deleted file mode 100644 index 5b561d05e5..0000000000 --- a/vendor/github.com/ligato/cn-infra/logging/logmanager/plugin_impl_log_manager.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logmanager - -import ( - "fmt" - "net/http" - "strings" - - "github.com/gorilla/mux" - "github.com/unrolled/render" - - "os" - - "github.com/ligato/cn-infra/config" - "github.com/ligato/cn-infra/core" - "github.com/ligato/cn-infra/logging" - log "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/rpc/rest" -) - -// LoggerData encapsulates parameters of a logger represented as strings. -type LoggerData struct { - Logger string `json:"logger"` - Level string `json:"level"` -} - -// Variable names in logger registry URLs -const ( - loggerVarName = "logger" - levelVarName = "level" -) - -// Plugin allows to manage log levels of the loggers using HTTP. -type Plugin struct { - Deps - *Conf -} - -// Deps groups dependencies injected into the plugin so that they are -// logically separated from other plugin fields. -type Deps struct { - Log logging.PluginLogger //inject - PluginName core.PluginName //inject - config.PluginConfig //inject - - LogRegistry logging.Registry // inject - HTTP rest.HTTPHandlers // inject -} - -// NewConf creates default configuration with InfoLevel & empty loggers. -// Suitable also for usage in flavor to programmatically specify default behavior. -func NewConf() *Conf { - return &Conf{ - DefaultLevel: "", - Loggers: []ConfLogger{}, - } -} - -// Conf is a binding that supports to define default log levels for multiple loggers -type Conf struct { - DefaultLevel string `json:"default-level"` - Loggers []ConfLogger `json:"loggers"` -} - -// ConfLogger is configuration of a particular logger. -// Currently we support only logger level. -type ConfLogger struct { - Name string - Level string //debug, info, warning, error, fatal, panic -} - -// Init does nothing -func (lm *Plugin) Init() error { - if lm.PluginConfig != nil { - if lm.Conf == nil { - lm.Conf = NewConf() - } - - _, err := lm.PluginConfig.GetValue(lm.Conf) - if err != nil { - return err - } - lm.Log.Debugf("logs config: %+v", lm.Conf) - - // Handle default log level. Prefer value from environmental variable - defaultLogLvl := os.Getenv("INITIAL_LOGLVL") - if defaultLogLvl == "" { - defaultLogLvl = lm.Conf.DefaultLevel - } - if defaultLogLvl != "" { - if err := lm.LogRegistry.SetLevel("default", defaultLogLvl); err != nil { - lm.Log.Warnf("setting default log level failed: %v", err) - } else { - // All loggers created up to this point were created with initial log level set (defined - // via INITIAL_LOGLVL env. variable with value 'info' by default), so at first, let's set default - // log level for all of them. - for loggerName := range lm.LogRegistry.ListLoggers() { - logger, exists := lm.LogRegistry.Lookup(loggerName) - if !exists { - continue - } - logger.SetLevel(stringToLogLevel(defaultLogLvl)) - } - } - } - - // Handle config file log levels - for _, logCfgEntry := range lm.Conf.Loggers { - // Put log/level entries from configuration file to the registry. - if err := lm.LogRegistry.SetLevel(logCfgEntry.Name, logCfgEntry.Level); err != nil { - // Intentionally just log warn & not propagate the error (it is minor thing to interrupt startup) - lm.Log.Warnf("setting log level %s for logger %s failed: %v", logCfgEntry.Level, - logCfgEntry.Name, err) - } - } - } - - return nil -} - -// AfterInit is called at plugin initialization. It register the following handlers: -// - List all registered loggers: -// > curl -X GET http://localhost:/log/list -// - Set log level for a registered logger: -// > curl -X PUT http://localhost:/log// -func (lm *Plugin) AfterInit() error { - if lm.HTTP != nil { - lm.HTTP.RegisterHTTPHandler(fmt.Sprintf("/log/{%s}/{%s:debug|info|warning|error|fatal|panic}", - loggerVarName, levelVarName), lm.logLevelHandler, "PUT") - lm.HTTP.RegisterHTTPHandler("/log/list", lm.listLoggersHandler, "GET") - } - return nil -} - -// Close is called at plugin cleanup phase. -func (lm *Plugin) Close() error { - return nil -} - -// ListLoggers lists all registered loggers. -func (lm *Plugin) listLoggers() []LoggerData { - var loggers []LoggerData - - lgs := lm.LogRegistry.ListLoggers() - for lg, lvl := range lgs { - ld := LoggerData{ - Logger: lg, - Level: lvl, - } - loggers = append(loggers, ld) - } - - return loggers -} - -// setLoggerLogLevel modifies the log level of the all loggers in a plugin -func (lm *Plugin) setLoggerLogLevel(name string, level string) error { - lm.Log.Debugf("SetLogLevel name '%s', level '%s'", name, level) - - return lm.LogRegistry.SetLevel(name, level) -} - -// logLevelHandler processes requests to set log level on loggers in a plugin -func (lm *Plugin) logLevelHandler(formatter *render.Render) http.HandlerFunc { - - return func(w http.ResponseWriter, req *http.Request) { - lm.Log.Infof("Path: %s", req.URL.Path) - vars := mux.Vars(req) - if vars == nil { - formatter.JSON(w, http.StatusNotFound, struct{}{}) - return - } - err := lm.setLoggerLogLevel(vars[loggerVarName], vars[levelVarName]) - if err != nil { - formatter.JSON(w, http.StatusNotFound, - struct{ Error string }{err.Error()}) - return - } - formatter.JSON(w, http.StatusOK, - LoggerData{Logger: vars[loggerVarName], Level: vars[levelVarName]}) - } -} - -// listLoggersHandler processes requests to list all registered loggers -func (lm *Plugin) listLoggersHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - formatter.JSON(w, http.StatusOK, lm.listLoggers()) - } -} - -// convert log level string representation to DebugLevel value -func stringToLogLevel(level string) log.LogLevel { - level = strings.ToLower(level) - switch level { - case "debug": - return log.DebugLevel - case "info": - return log.InfoLevel - case "warn": - return log.WarnLevel - case "error": - return log.ErrorLevel - case "fatal": - return log.FatalLevel - case "panic": - return log.PanicLevel - } - - return log.InfoLevel -} diff --git a/vendor/github.com/ligato/cn-infra/logging/logrus/logger.go b/vendor/github.com/ligato/cn-infra/logging/logrus/logger.go index 31de347059..4823ed2dbb 100644 --- a/vendor/github.com/ligato/cn-infra/logging/logrus/logger.go +++ b/vendor/github.com/ligato/cn-infra/logging/logrus/logger.go @@ -40,8 +40,12 @@ var ( defaultLogger = NewLogger(DefaultLoggerName) ) -// DefaultLogger returns a global Logrus logger. Please notice, that recommended -// approach is to create a custom logger. +func init() { + logging.DefaultLogger = defaultLogger +} + +// DefaultLogger returns a global Logrus logger. +// Note, that recommended approach is to create a custom logger. func DefaultLogger() *Logger { return defaultLogger } @@ -275,8 +279,8 @@ func (logger *Logger) WithField(key string, value interface{}) logging.LogWithLe // // Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal // or Panic on the LogMsg it returns. -func (logger *Logger) WithFields(fields map[string]interface{}) logging.LogWithLevel { - return logger.withFields(logging.Fields(fields), 1) +func (logger *Logger) WithFields(fields logging.Fields) logging.LogWithLevel { + return logger.withFields(fields, 1) } func (logger *Logger) withFields(fields logging.Fields, depth int) *Entry { diff --git a/vendor/github.com/ligato/cn-infra/logging/logrus/registry.go b/vendor/github.com/ligato/cn-infra/logging/logrus/registry.go index 005488087e..b7a0ac425d 100644 --- a/vendor/github.com/ligato/cn-infra/logging/logrus/registry.go +++ b/vendor/github.com/ligato/cn-infra/logging/logrus/registry.go @@ -26,7 +26,7 @@ import ( ) // DefaultRegistry is a default logging registry -var DefaultRegistry logging.Registry +//var DefaultRegistry logging.Registry var initialLogLvl = logrus.InfoLevel @@ -39,7 +39,7 @@ func init() { defaultLogger.Debugf("initial log level: %v", lvl.String()) } } - DefaultRegistry = NewLogRegistry() + logging.DefaultRegistry = NewLogRegistry() } // NewLogRegistry is a constructor diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/README.md b/vendor/github.com/ligato/cn-infra/messaging/kafka/README.md deleted file mode 100644 index 1f97d26ea6..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# Kafka - -The client package provides single purpose clients for publishing -synchronous/asynchronous messages and for consuming selected topics. - -The mux package uses these clients and allows to share their access to kafka brokers -among multiple entities. This package also implements the generic messaging API defined in the parent package. - -## Requirements - -Minimal supported version of kafka is determined by [sarama](https://github.com/Shopify/sarama) -library - Kafka 0.10 and 0.9, although older releases are still likely to work. - -If you don't have kafka installed locally you can use docker image for testing: - ``` -sudo docker run -p 2181:2181 -p 9092:9092 --name kafka --rm \ - --env ADVERTISED_HOST=172.17.0.1 --env ADVERTISED_PORT=9092 spotify/kafka -``` - -# Kafka plugin - -Kafka plugin provides access to kafka brokers. - -**API** - -The plugin's API is documented at the end of [doc.go](doc.go). - -**Configuration** -- Location of the Kafka configuration file can be defined either by command line flag `kafka-config` or -set via `KAFKA_CONFIG` env variable. - -**Status Check** - -- Kafka plugin has a mechanism to periodically check a connection status of the Kafka server. diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/client/asyncproducer.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/client/asyncproducer.go deleted file mode 100644 index 1a790ff9d1..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/client/asyncproducer.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "crypto/md5" - "errors" - "fmt" - "sync" - - "github.com/Shopify/sarama" - "github.com/ligato/cn-infra/logging" -) - -// AsyncProducer allows to publish message to kafka using asynchronous API. -// The message using SendMsgToPartition and SendMsgByte function returns do not block. -// The status whether message was sent successfully or not is delivered using channels -// specified in config structure. -type AsyncProducer struct { - logging.Logger - Config *Config - Client sarama.Client - Producer sarama.AsyncProducer - Partition int32 - closed bool - xwg *sync.WaitGroup - closeChannel chan struct{} - sync.Mutex -} - -// NewAsyncProducer returns a new AsyncProducer instance. Producer is created from provided sarama client which can be nil; -// in that case a new client will be created. Also the partitioner is set here. Note: provided sarama client partitioner -// should match the one used in config. -func NewAsyncProducer(config *Config, sClient sarama.Client, partitioner string, wg *sync.WaitGroup) (*AsyncProducer, error) { - if config.Debug { - config.Logger.SetLevel(logging.DebugLevel) - } - - config.Logger.Debug("Entering NewAsyncProducer ...") - if err := config.ValidateAsyncProducerConfig(); err != nil { - return nil, err - } - - // set "RequiredAcks" for producer - if config.RequiredAcks == AcksUnset { - config.RequiredAcks = WaitForLocal - } - err := setProducerRequiredAcks(config) - if err != nil { - return nil, errors.New("invalid RequiredAcks field in config") - } - - // set partitioner - config.SetPartitioner(partitioner) - - // initAsyncProducer object - ap := &AsyncProducer{ - Logger: config.Logger, - Config: config, - Partition: config.Partition, - closed: false, - closeChannel: make(chan struct{}), - } - - // If client is nil, create a new one - if sClient == nil { - localClient, err := NewClient(config, partitioner) - if err != nil { - return nil, err - } - // store local client in syncProducer if it was created here - ap.Client = localClient - sClient = localClient - } - - // init a new asyncproducer using this client - producer, err := sarama.NewAsyncProducerFromClient(sClient) - if err != nil { - return nil, err - } - ap.Producer = producer - - // if there is a "waitgroup" arg then use it - if wg != nil { - ap.xwg = wg - ap.xwg.Add(1) - } - - // if required, start reading from the successes channel - if config.ProducerConfig().Producer.Return.Successes { - go ap.successHandler(ap.Producer.Successes()) - } - - // if required, start reading from the errors channel - if config.ProducerConfig().Producer.Return.Errors { - go ap.errorHandler(ap.Producer.Errors()) - } - - return ap, nil -} - -// SendMsgByte sends an async message to Kafka. -func (ref *AsyncProducer) SendMsgByte(topic string, key []byte, msg []byte, metadata interface{}) { - // generate key if none supplied - used by Hash partitioner - if key == nil || len(key) == 0 { - md5Sum := fmt.Sprintf("%x", md5.Sum(msg)) - ref.SendMsgToPartition(topic, ref.Partition, sarama.ByteEncoder([]byte(md5Sum)), sarama.ByteEncoder(msg), metadata) - return - } - ref.SendMsgToPartition(topic, ref.Partition, sarama.ByteEncoder(key), sarama.ByteEncoder(msg), metadata) -} - -// SendMsgToPartition sends an async message to Kafka -func (ref *AsyncProducer) SendMsgToPartition(topic string, partition int32, key Encoder, msg Encoder, metadata interface{}) { - if msg == nil { - return - } - - message := &sarama.ProducerMessage{ - Topic: topic, - Partition: partition, - Key: key, - Value: msg, - Metadata: metadata, - } - - ref.Producer.Input() <- message - - ref.Debugf("message sent: %s", message) - - return -} - -// Close closes the client and producer -func (ref *AsyncProducer) Close(async ...bool) error { - var err error - defer func() { - if ref.closed { - ref.Unlock() - return - } - ref.closed = true - close(ref.closeChannel) - - // decrement external wait group - if ref.xwg != nil { - ref.xwg.Done() - } - ref.Unlock() - }() - - ref.Lock() - if ref.closed { - return nil - } - - if async != nil && len(async) > 0 { - ref.Debug("async close") - ref.Producer.AsyncClose() - } else { - ref.Debug("sync close") - ref.Producer.Close() - } - if err != nil { - ref.Errorf("asyncProducer close error: %v", err) - return err - } - if ref.Client != nil && !ref.Client.Closed() { - err = ref.Client.Close() - if err != nil { - ref.Errorf("client close error: %v", err) - return err - } - } - - return nil -} - -// successHandler handles success messages -func (ref *AsyncProducer) successHandler(in <-chan *sarama.ProducerMessage) { - ref.Debug("starting success handler ...") - for { - select { - case <-ref.closeChannel: - ref.Debug("success handler exited ...") - return - case msg := <-in: - if msg == nil { - continue - } - ref.Debugf("Message is stored in topic(%s)/partition(%d)/offset(%d)\n", msg.Topic, msg.Partition, msg.Offset) - pmsg := &ProducerMessage{ - Topic: msg.Topic, - Key: msg.Key, - Value: msg.Value, - Metadata: msg.Metadata, - Offset: msg.Offset, - Partition: msg.Partition, - } - ref.Config.SuccessChan <- pmsg - } - } -} - -// errorHandler handles error messages -func (ref *AsyncProducer) errorHandler(in <-chan *sarama.ProducerError) { - ref.Debug("starting error handler ...") - for { - select { - case <-ref.closeChannel: - ref.Debug("error handler exited ...") - return - case perr := <-in: - if perr == nil { - continue - } - - msg := perr.Msg - err := perr.Err - pmsg := &ProducerMessage{ - Topic: msg.Topic, - Key: msg.Key, - Value: msg.Value, - Metadata: msg.Metadata, - Offset: msg.Offset, - Partition: msg.Partition, - } - perr2 := &ProducerError{ - ProducerMessage: pmsg, - Err: err, - } - val, _ := msg.Value.Encode() - ref.Errorf("message %s errored in topic(%s)/partition(%d)/offset(%d)\n", string(val), pmsg.Topic, pmsg.Partition, pmsg.Offset) - ref.Errorf("message error: %v", perr.Err) - ref.Config.ErrorChan <- perr2 - } - } -} - -// IsClosed returns the "closed" status -func (ref *AsyncProducer) IsClosed() bool { - ref.Lock() - defer ref.Unlock() - return ref.closed -} - -// WaitForClose returns when the producer is closed -func (ref *AsyncProducer) WaitForClose() { - <-ref.closeChannel -} - -// GetCloseChannel returns a channel that is closed on asyncProducer cleanup -func (ref *AsyncProducer) GetCloseChannel() <-chan struct{} { - return ref.closeChannel -} diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/client/config.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/client/config.go deleted file mode 100644 index 919d1271d8..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/client/config.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "context" - "errors" - "strings" - - "crypto/tls" - "github.com/Shopify/sarama" - "github.com/bsm/sarama-cluster" - "github.com/ligato/cn-infra/logging" -) - -// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements -// it must see before responding. Any of the constants defined here are valid except AcksUnset. -type RequiredAcks int16 - -const ( - // AcksUnset indicates that no valid value has been set - AcksUnset RequiredAcks = -32768 - // NoResponse doesn't send any response, the TCP ACK is all you get. - NoResponse RequiredAcks = 0 - // WaitForLocal waits for only the local commit to succeed before responding. - WaitForLocal RequiredAcks = 1 - // WaitForAll waits for all replicas to commit before responding. - WaitForAll RequiredAcks = -1 -) - -// Partitioner schemes -const ( - // Hash scheme (messages with the same key always end up on the same partition) - Hash = "hash" - // Random scheme (random partition is always used) - Random = "random" - // Manual scheme (partitions are manually set in the provided message's partition field) - Manual = "manual" -) - -// Config struct provides the configuration for a Producer (Sync or Async) and Consumer. -type Config struct { - logging.Logger - // Config extends the sarama-cluster.Config with the kafkaclient namespace - *cluster.Config - // Context Package carries deadlines, cancelation signals, and other values. - // see: http://golang.org/x/net/context - Context context.Context - // Cancel is a function that can be call, e.g. config.Cancel(), to cancel and close - // the producer/consumer - Cancel context.CancelFunc - // Brokers contains "{domain:port}" array of Kafka brokers. - // This list of brokers is used by the kafkaclient to determine the 'lead' broker for each topic - // and the 'lead' consumer for each topic. If only one broker is supplied then it will be used to - // communicate with the other brokers. - // REQUIRED: PRODUCER AND CONSUMER. - Brokers []string - // GroupID contains the name of the consumer's group. - // REQUIRED: CONSUMER. - GroupID string - // Debug determines if debug code should be 'turned-on'. - // DEFAULT: false. OPTIONAL. - Debug bool - // Topics contains the topics that a consumer should retrieve messages for. - // REQUIRED: CONSUMER. - Topics []string - // Partition is the partition. Used when configuring partitions manually. - Partition int32 - // Partitioner is the method used to determine a topic's partition. - // REQUIRED: PRODUCER. DEFAULT: HASH - Partitioner sarama.PartitionerConstructor - // InitialOffset indicates the initial offset that should be used when a consumer is initialized and begins reading - // the Kafka message log for the topic. If the offset was previously committed then the committed offset is used - // rather than the initial offset. - // REQUIRED: CONSUMER - InitialOffset int64 - // RequiredAcks is the level of acknowledgement reliability needed from the broker - // REQUIRED: PRODUCER. DEFAULT(Async) WaitForLocal DEFAULT(Sync) WaitForAll - RequiredAcks RequiredAcks - // RecvNotification indicates that a Consumer return "Notification" messages after it has rebalanced. - // REQUIRED: CONSUMER. DEFAULT: false. - RecvNotification bool - // NotificationChan function called when a "Notification" message is received by a consumer. - // REQUIRED: CONSUMER if 'RecvNotification=true' - RecvNotificationChan chan *cluster.Notification - // RecvError indicates that "receive" errors should not be ignored and should be returned to the consumer. - // REQUIRED: CONSUMER. DEFAULT: true. - RecvError bool - // RecvErrorChan channel is for delivery of "Error" messages received by the consumer. - // REQUIRED: CONSUMER if 'RecvError=true' - RecvErrorChan chan error - // MessageChan channel is used for delivery of consumer messages. - // REQUIRED: CONSUMER - RecvMessageChan chan *ConsumerMessage - // SendSuccess indicates that the Async Producer should return "Success" messages when a message - // has been successfully received by the Kafka. - // REQUIRED: CONSUMER. DEFAULT: false. - SendSuccess bool - // SuccessChan is used for delivery of message when a "Success" is returned by Async Producer. - // REQUIRED: PRODUCER if 'SendSuccess=true' - SuccessChan chan *ProducerMessage - // SendError indicates that an Async Producer should return "Error" messages when a message transmission to Kafka - // failed. - // REQUIRED: CONSUMER. DEFAULT: true. - SendError bool - // ErrorChan is used for delivery of "Error" message if an error is returned by Async Producer. - // REQUIRED: PRODUCER if 'SendError=true' - ErrorChan chan *ProducerError -} - -// NewConfig return a new Config object. -func NewConfig(log logging.Logger) *Config { - - cfg := &Config{ - Logger: log, - Config: cluster.NewConfig(), - Partition: -1, - Partitioner: sarama.NewHashPartitioner, - RequiredAcks: AcksUnset, - } - - return cfg -} - -// SetBrokers sets the Config.Brokers field -func (ref *Config) SetBrokers(brokers ...string) { - ref.Brokers = brokers -} - -// SetTopics sets the Config.Topics field -func (ref *Config) SetTopics(topics string) { - ref.Topics = strings.Split(topics, ",") -} - -// SetDebug sets the Config.Debug field -func (ref *Config) SetDebug(val bool) { - if val { - ref.Debug = val - sarama.Logger = ref.Logger - ref.SetLevel(logging.DebugLevel) - } else { - ref.Debug = val - } -} - -// SetGroup sets the Config.GroupID field -func (ref *Config) SetGroup(id string) { - ref.GroupID = id -} - -// SetAcks sets the Config.RequiredAcks field -func (ref *Config) SetAcks(acks RequiredAcks) { - ref.RequiredAcks = acks -} - -// SetInitialOffset sets the Config.InitialOffset field -func (ref *Config) SetInitialOffset(offset int64) { - ref.InitialOffset = offset -} - -// SetSendSuccess sets the Config.SendSuccess field -func (ref *Config) SetSendSuccess(val bool) { - ref.SendSuccess = val -} - -// SetSendError sets the Config.SendError field -func (ref *Config) SetSendError(val bool) { - ref.SendError = val -} - -// SetRecvNotification sets the Config.RecvNotification field -func (ref *Config) SetRecvNotification(val bool) { - ref.RecvNotification = val -} - -// SetRecvError sets the Config.RecvError field -func (ref *Config) SetRecvError(val bool) { - ref.RecvError = val -} - -// SetSuccessChan sets the Config.SuccessChan field -func (ref *Config) SetSuccessChan(val chan *ProducerMessage) { - ref.SuccessChan = val -} - -// SetErrorChan sets the Config.ErrorChan field -func (ref *Config) SetErrorChan(val chan *ProducerError) { - ref.ErrorChan = val -} - -// SetRecvNotificationChan sets the Config.RecvNotificationChan field -func (ref *Config) SetRecvNotificationChan(val chan *cluster.Notification) { - ref.RecvNotificationChan = val -} - -// SetRecvErrorChan sets the Config.RecvErrorChan field -func (ref *Config) SetRecvErrorChan(val chan error) { - ref.RecvErrorChan = val -} - -// SetRecvMessageChan sets the Config.RecvMessageChan field -func (ref *Config) SetRecvMessageChan(val chan *ConsumerMessage) { - ref.RecvMessageChan = val -} - -// ProducerConfig sets the Config.ProducerConfig field -func (ref *Config) ProducerConfig() *sarama.Config { - return &ref.Config.Config -} - -// ConsumerConfig sets the Config.ConsumerConfig field -func (ref *Config) ConsumerConfig() *cluster.Config { - return ref.Config -} - -// SetPartition sets the Config.SetPartition field -func (ref *Config) SetPartition(val int32) { - ref.Partition = val -} - -// SetPartitioner sets the Config.SetPartitioner field -func (ref *Config) SetPartitioner(val string) { - switch val { - default: - ref.Errorf("Invalid partitioner %s - defaulting to ''", val) - fallthrough - case "": - if ref.Partition >= 0 { - ref.Partitioner = sarama.NewManualPartitioner - } else { - ref.Partitioner = sarama.NewHashPartitioner - } - case Hash: - ref.Partitioner = sarama.NewHashPartitioner - ref.Partition = -1 - case Random: - ref.Partitioner = sarama.NewRandomPartitioner - ref.Partition = -1 - case Manual: - ref.Partitioner = sarama.NewManualPartitioner - if ref.Partition < 0 { - ref.Infof("Invalid partition %d - defaulting to 0", ref.Partition) - ref.Partition = 0 - } - } -} - -// SetTLS sets the TLS configuration -func (ref *Config) SetTLS(tlsConfig *tls.Config) (err error) { - ref.Net.TLS.Enable = true - ref.Net.TLS.Config = tlsConfig - - return nil -} - -// ValidateAsyncProducerConfig validates config for an Async Producer -func (ref *Config) ValidateAsyncProducerConfig() error { - if ref.Brokers == nil { - return errors.New("invalid Brokers - one or more brokers must be specified") - } - if ref.SendSuccess && ref.SuccessChan == nil { - return errors.New("success channel not specified") - } - if ref.SendError && ref.ErrorChan == nil { - return errors.New("error channel not specified") - } - return ref.ProducerConfig().Validate() -} - -// ValidateSyncProducerConfig validates config for a Sync Producer -func (ref *Config) ValidateSyncProducerConfig() error { - if ref.Brokers == nil { - return errors.New("invalid Brokers - one or more brokers must be specified") - } - return ref.ProducerConfig().Validate() -} - -// ValidateConsumerConfig validates config for Consumer -func (ref *Config) ValidateConsumerConfig() error { - if ref.Brokers == nil { - return errors.New("invalid Brokers - one or more brokers must be specified") - } - if ref.GroupID == "" { - return errors.New("invalid GroupID - no GroupID specified") - } - if ref.RecvNotification && ref.RecvNotificationChan == nil { - return errors.New("notification channel not specified") - } - if ref.RecvError && ref.RecvErrorChan == nil { - return errors.New("error channel not specified") - } - if ref.RecvMessageChan == nil { - return errors.New("recvMessageChan not specified") - } - return ref.ConsumerConfig().Validate() -} diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/client/consumer.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/client/consumer.go deleted file mode 100644 index 2741517b18..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/client/consumer.go +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "fmt" - "sync" - "time" - - "github.com/Shopify/sarama" - "github.com/bsm/sarama-cluster" - "github.com/ligato/cn-infra/logging" -) - -// clusterConsumer defines an interface that allows to mock the implementation of -// bsm/sarama-cluster consumer. -type clusterConsumer interface { - Notifications() <-chan *cluster.Notification - Errors() <-chan error - Messages() <-chan *sarama.ConsumerMessage - Close() (err error) - MarkOffset(msg *sarama.ConsumerMessage, metadata string) - MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) - Subscriptions() map[string][]int32 - CommitOffsets() error -} - -// Consumer allows to consume message belonging to specified set of kafka -// topics. -type Consumer struct { - logging.Logger - Config *Config - SConsumer sarama.Consumer - Consumer clusterConsumer - closed bool - xwg *sync.WaitGroup - closeChannel chan struct{} - sync.Mutex -} - -// NewConsumer returns a Consumer instance. If startHandlers is set to true, reading of messages, errors -// and notifications is started using new consumer. Otherwise, only instance is returned -func NewConsumer(config *Config, wg *sync.WaitGroup) (*Consumer, error) { - if config.Debug { - config.Logger.SetLevel(logging.DebugLevel) - } - config.Logger.Debug("entering NewConsumer ...") - if err := config.ValidateConsumerConfig(); err != nil { - return nil, err - } - config.Logger.Debugf("Consumer config: %#v", config) - - // set consumer config params - config.ConsumerConfig().Group.Return.Notifications = config.RecvNotification - config.ProducerConfig().Consumer.Return.Errors = config.RecvError - config.ConsumerConfig().Consumer.Offsets.Initial = config.InitialOffset - - cClient, err := cluster.NewClient(config.Brokers, config.Config) - if err != nil { - return nil, err - } - - config.Logger.Debug("new client created successfully ...") - - consumer, err := cluster.NewConsumerFromClient(cClient, config.GroupID, config.Topics) - if err != nil { - return nil, err - } - - sConsumer, err := sarama.NewConsumerFromClient(cClient) - if err != nil { - return nil, err - } - - csmr := &Consumer{ - Logger: config.Logger, - Config: config, - SConsumer: sConsumer, - Consumer: consumer, - closed: false, - closeChannel: make(chan struct{}), - } - - // if there is a "waitgroup" arg then use it - if wg != nil { - csmr.xwg = wg - csmr.xwg.Add(1) - } - - return csmr, nil -} - -// StartConsumerHandlers starts required handlers using bsm/sarama consumer. Used when partitioner set in config is -// non-manual -func (ref *Consumer) StartConsumerHandlers() { - config := ref.Config - config.Logger.Info("Starting message handlers for new consumer ...") - // if required, start reading from the notifications channel - if config.ConsumerConfig().Group.Return.Notifications { - go ref.notificationHandler(ref.Consumer.Notifications()) - } - - // if required, start reading from the errors channel - if config.ProducerConfig().Consumer.Return.Errors { - go ref.errorHandler(ref.Consumer.Errors()) - } - - // start the message handler - go ref.messageHandler(ref.Consumer.Messages()) -} - -// StartConsumerManualHandlers starts required handlers using sarama partition consumer. Used when partitioner set in config is -// manual -func (ref *Consumer) StartConsumerManualHandlers(partitionConsumer sarama.PartitionConsumer) { - config := ref.Config - config.Logger.Info("Starting message handlers for new manual consumer ...") - - // if required, start reading from the errors channel - if config.ProducerConfig().Consumer.Return.Errors { - go ref.manualErrorHandler(partitionConsumer.Errors()) - } - - // start the message handler - go ref.messageHandler(partitionConsumer.Messages()) -} - -// NewClient initializes new sarama client instance from provided config and with defined partitioner -func NewClient(config *Config, partitioner string) (sarama.Client, error) { - config.Logger.Debug("Creating new consumer") - if err := config.ValidateAsyncProducerConfig(); err != nil { - return nil, err - } - - config.SetSendSuccess(true) - config.SetSuccessChan(make(chan *ProducerMessage)) - config.SetSendError(true) - config.SetErrorChan(make(chan *ProducerError)) - // Required acks will be set in sync/async producer - config.RequiredAcks = AcksUnset - - // set other Producer config params - config.ProducerConfig().Producer.Return.Successes = config.SendSuccess - config.ProducerConfig().Producer.Return.Errors = config.SendError - - // set partitioner - switch partitioner { - case Hash: - config.ProducerConfig().Producer.Partitioner = sarama.NewHashPartitioner - case Random: - config.ProducerConfig().Producer.Partitioner = sarama.NewRandomPartitioner - case Manual: - config.ProducerConfig().Producer.Partitioner = sarama.NewManualPartitioner - default: - // Hash partitioner is set as default - config.ProducerConfig().Producer.Partitioner = sarama.NewHashPartitioner - } - - config.Logger.Debugf("AsyncProducer config: %#v", config) - - sClient, err := sarama.NewClient(config.Brokers, &config.Config.Config) - if err != nil { - fmt.Printf("Error creating consumer client %v", err) - return nil, err - } - - return sClient, nil -} - -// Close closes the client and consumer -func (ref *Consumer) Close() error { - ref.Debug("entering consumer close ...") - defer func() { - ref.Debug("running defer ...") - if ref.closed { - ref.Debug("consumer already closed ...") - ref.Unlock() - return - } - ref.Debug("setting closed ...") - ref.closed = true - ref.Debug("closing closeChannel channel ...") - close(ref.closeChannel) - - if ref.xwg != nil { - ref.xwg.Done() - } - ref.Unlock() - }() - - ref.Debug("about to lock ...") - ref.Lock() - ref.Debug("locked ...") - if ref.closed { - return nil - } - - // close consumer - ref.Debug("calling consumer close ....") - err := ref.Consumer.Close() - if err != nil { - ref.Errorf("consumer close error: %v", err) - return err - } - ref.Debug("consumer closed") - - return nil -} - -// IsClosed returns the "closed" status -func (ref *Consumer) IsClosed() bool { - return ref.closed -} - -// WaitForClose waits for the consumer to close -func (ref *Consumer) WaitForClose() { - <-ref.closeChannel - ref.Debug("exiting WaitForClose ...") -} - -// MarkOffset marks the provided message as processed, alongside a metadata string -// that represents the state of the partition consumer at that point in time. The -// metadata string can be used by another consumer to restore that state, so it -// can resume consumption. -// -// Note: calling MarkOffset does not necessarily commit the offset to the backend -// store immediately for efficiency reasons, and it may never be committed if -// your application crashes. This means that you may end up processing the same -// message twice, and your processing should ideally be idempotent. -func (ref *Consumer) MarkOffset(msg *ConsumerMessage, metadata string) { - - ref.Consumer.MarkOffset(&sarama.ConsumerMessage{ - Topic: msg.Topic, - Partition: msg.Partition, - Offset: msg.Offset, - }, metadata) -} - -// MarkPartitionOffset marks an offset of the provided topic/partition as processed. -// See MarkOffset for additional explanation. -func (ref *Consumer) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) { - ref.Consumer.MarkPartitionOffset(topic, partition, offset, metadata) -} - -// Subscriptions returns the consumed topics and partitions -func (ref *Consumer) Subscriptions() map[string][]int32 { - return ref.Consumer.Subscriptions() -} - -// CommitOffsets manually commits marked offsets -func (ref *Consumer) CommitOffsets() error { - return ref.Consumer.CommitOffsets() -} - -// PrintNotification print the topics and partitions -func (ref *Consumer) PrintNotification(note map[string][]int32) { - for k, v := range note { - fmt.Printf(" Topic: %s\n", k) - fmt.Printf(" Partitions: %v\n", v) - } -} - -// messageHandler processes each incoming message -func (ref *Consumer) messageHandler(in <-chan *sarama.ConsumerMessage) { - ref.Debug("messageHandler started ...") - var prevValue []byte - - for { - select { - case msg := <-in: - if msg == nil { - continue - } - consumerMsg := &ConsumerMessage{ - Key: msg.Key, - Value: msg.Value, - PrevValue: prevValue, - Topic: msg.Topic, - Partition: msg.Partition, - Offset: msg.Offset, - Timestamp: msg.Timestamp, - } - // Store value as previous for the next iteration - prevValue = consumerMsg.Value - select { - case ref.Config.RecvMessageChan <- consumerMsg: - case <-time.After(1 * time.Second): - ref.Warn("Failed to deliver a message") - } - case <-ref.closeChannel: - ref.Debug("Canceling message handler") - return - } - } -} - -// manualErrorHandler processes each error message for partition consumer -func (ref *Consumer) manualErrorHandler(in <-chan *sarama.ConsumerError) { - ref.Debug("errorHandler started ...") - for { - select { - case err, more := <-in: - if more { - ref.Errorf("message error: %T, %v", err, err) - ref.Config.RecvErrorChan <- err - } - case <-ref.closeChannel: - ref.Debug("Canceling error handler") - return - } - } -} - -// errorHandler processes each error message -func (ref *Consumer) errorHandler(in <-chan error) { - ref.Debug("errorHandler started ...") - for { - select { - case err, more := <-in: - if more { - ref.Errorf("message error: %T, %v", err, err) - ref.Config.RecvErrorChan <- err - } - case <-ref.closeChannel: - ref.Debug("Canceling error handler") - return - } - } -} - -// NotificationHandler processes each message received when the consumer is rebalanced -func (ref *Consumer) notificationHandler(in <-chan *cluster.Notification) { - ref.Debug("NotificationHandler started ...") - - for { - select { - case note := <-in: - ref.Config.RecvNotificationChan <- note - case <-ref.closeChannel: - ref.Debug("Canceling notification handler") - return - } - } -} - -// GetCloseChannel returns a channel that is closed on asyncProducer cleanup -func (ref *Consumer) GetCloseChannel() <-chan struct{} { - return ref.closeChannel -} diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/client/doc.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/client/doc.go deleted file mode 100644 index a75cf2ae9c..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/client/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package client implements the synchronous and asynchronous kafka Producers -// and the kafka Consumer. -package client diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/client/messages.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/client/messages.go deleted file mode 100644 index f7309fc81d..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/client/messages.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "fmt" - "time" - - "github.com/Shopify/sarama" - "github.com/golang/protobuf/proto" - "github.com/ligato/cn-infra/db/keyval" -) - -// Encoder defines an interface that is used as argument of producer functions. -// It wraps the sarama.Encoder -type Encoder interface { - sarama.Encoder -} - -// ConsumerMessage encapsulates a Kafka message returned by the consumer. -type ConsumerMessage struct { - Key, Value, PrevValue []byte - Topic string - Partition int32 - Offset int64 - Timestamp time.Time -} - -// GetTopic returns the topic associated with the message -func (cm *ConsumerMessage) GetTopic() string { - return cm.Topic -} - -// GetPartition returns the partition associated with the message -func (cm *ConsumerMessage) GetPartition() int32 { - return cm.Partition -} - -// GetOffset returns the offset associated with the message -func (cm *ConsumerMessage) GetOffset() int64 { - return cm.Offset -} - -// GetKey returns the key associated with the message. -func (cm *ConsumerMessage) GetKey() string { - return string(cm.Key) -} - -// GetValue returns the value associated with the message. -func (cm *ConsumerMessage) GetValue() []byte { - return cm.Value -} - -// GetPrevValue returns the previous value associated with the message. -func (cm *ConsumerMessage) GetPrevValue() []byte { - return cm.PrevValue -} - -// ProtoConsumerMessage encapsulates a Kafka message returned by the consumer and provides means -// to unmarshal the value into proto.Message. -type ProtoConsumerMessage struct { - *ConsumerMessage - serializer keyval.Serializer -} - -// NewProtoConsumerMessage creates new instance of ProtoConsumerMessage -func NewProtoConsumerMessage(msg *ConsumerMessage, serializer keyval.Serializer) *ProtoConsumerMessage { - return &ProtoConsumerMessage{msg, serializer} -} - -// GetTopic returns the topic associated with the message. -func (cm *ProtoConsumerMessage) GetTopic() string { - return cm.Topic -} - -// GetPartition returns the partition associated with the message. -func (cm *ProtoConsumerMessage) GetPartition() int32 { - return cm.Partition -} - -// GetOffset returns the offset associated with the message. -func (cm *ProtoConsumerMessage) GetOffset() int64 { - return cm.Offset -} - -// GetKey returns the key associated with the message. -func (cm *ProtoConsumerMessage) GetKey() string { - return string(cm.Key) -} - -// GetValue returns the value associated with the message. -func (cm *ProtoConsumerMessage) GetValue(msg proto.Message) error { - err := cm.serializer.Unmarshal(cm.ConsumerMessage.GetValue(), msg) - if err != nil { - return err - } - return nil -} - -// GetPrevValue returns the previous value associated with the latest message. -func (cm *ProtoConsumerMessage) GetPrevValue(msg proto.Message) (prevValueExist bool, err error) { - prevVal := cm.ConsumerMessage.GetPrevValue() - if prevVal == nil { - return false, nil - } - err = cm.serializer.Unmarshal(prevVal, msg) - if err != nil { - return true, err - } - return true, nil -} - -// ProducerMessage is the collection of elements passed to the Producer in order to send a message. -type ProducerMessage struct { - // The Kafka topic for this message. - Topic string - // The partitioning key for this message. Pre-existing Encoders include - // StringEncoder and ByteEncoder. - Key Encoder - // The actual message to store in Kafka. Pre-existing Encoders include - // StringEncoder and ByteEncoder. - Value Encoder - - // This field is used to hold arbitrary data you wish to include so it - // will be available when receiving on the Successes and Errors channels. - // Sarama completely ignores this field and is only to be used for - // pass-through data. - Metadata interface{} - - // Below this point are filled in by the producer as the message is processed - - // Offset is the offset of the message stored on the broker. This is only - // guaranteed to be defined if the message was successfully delivered and - // RequiredAcks is not NoResponse. - Offset int64 - // Partition is the partition that the message was sent to. This is only - // guaranteed to be defined if the message was successfully delivered. - Partition int32 -} - -// GetTopic returns the topic associated with the message. -func (pm *ProducerMessage) GetTopic() string { - return pm.Topic -} - -// GetPartition returns the partition associated with the message. -func (pm *ProducerMessage) GetPartition() int32 { - return pm.Partition -} - -// GetOffset returns the offset associated with the message. -func (pm *ProducerMessage) GetOffset() int64 { - return pm.Offset -} - -// GetKey returns the key associated with the message. -func (pm *ProducerMessage) GetKey() string { - key, _ := pm.Key.Encode() - return string(key) -} - -// GetValue returns the content of the message. -func (pm *ProducerMessage) GetValue() []byte { - val, _ := pm.Value.Encode() - return val -} - -// GetPrevValue returns nil for the producer -func (pm *ProducerMessage) GetPrevValue() []byte { - return nil -} - -func (pm *ProducerMessage) String() string { - var meta string - switch t := pm.Metadata.(type) { - default: - meta = fmt.Sprintf("unexpected type %T", t) // %T prints whatever type t has - case string: - meta = t - case *string: - meta = *t - case []byte: - meta = string(t) - case bool: - meta = fmt.Sprintf("%t", t) // t has type bool - case int: - meta = fmt.Sprintf("%d", t) // t has type int - case *bool: - meta = fmt.Sprintf("%t", *t) // t has type *bool - case *int: - meta = fmt.Sprintf("%d", *t) // t has type *int - } - - key, _ := pm.Key.Encode() - val, _ := pm.Value.Encode() - - return fmt.Sprintf("ProducerMessage - Topic: %s, Key: %s, Value: %s, Meta: %v, Offset: %d, Partition: %d\n", pm.Topic, string(key), string(val), meta, pm.Offset, pm.Partition) -} - -// ProducerError is the type of error generated when the producer fails to deliver a message. -// It contains the original ProducerMessage as well as the actual error value. -type ProducerError struct { - *ProducerMessage - Err error -} - -func (ref *ProducerError) Error() error { - return ref.Err -} - -func (ref *ProducerError) String() string { - return fmt.Sprintf("ProducerError: %s, error: %v\n", ref.ProducerMessage, ref.Err.Error()) -} - -// ProtoProducerMessage is wrapper of a producer message that simplify work with proto-modelled data. -type ProtoProducerMessage struct { - *ProducerMessage - Serializer keyval.Serializer -} - -// GetTopic returns the topic associated with the message. -func (ppm *ProtoProducerMessage) GetTopic() string { - return ppm.Topic -} - -// GetPartition returns the partition associated with the message. -func (ppm *ProtoProducerMessage) GetPartition() int32 { - return ppm.Partition -} - -// GetOffset returns the offset associated with the message. -func (ppm *ProtoProducerMessage) GetOffset() int64 { - return ppm.Offset -} - -// GetKey returns the key associated with the message. -func (ppm *ProtoProducerMessage) GetKey() string { - key, _ := ppm.Key.Encode() - return string(key) -} - -// GetValue unmarshalls the content of the msg into provided structure. -func (ppm *ProtoProducerMessage) GetValue(msg proto.Message) error { - err := ppm.Serializer.Unmarshal(ppm.ProducerMessage.GetValue(), msg) - if err != nil { - return err - } - return nil -} - -// GetPrevValue for producer returns false (value does not exist) -func (ppm *ProtoProducerMessage) GetPrevValue(msg proto.Message) (prevValueExist bool, err error) { - return false, nil -} - -// ProtoProducerMessageErr represents a proto-modelled message that was not published successfully. -type ProtoProducerMessageErr struct { - *ProtoProducerMessage - Err error -} - -func (pme *ProtoProducerMessageErr) Error() error { - return pme.Err -} diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/client/mocks.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/client/mocks.go deleted file mode 100644 index c5bb347035..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/client/mocks.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "github.com/Shopify/sarama" - "github.com/Shopify/sarama/mocks" - "github.com/bsm/sarama-cluster" - "github.com/ligato/cn-infra/logging/logrus" -) - -type clusterConsumerMock struct { - notifCh chan *cluster.Notification - errCh chan error - consumer sarama.Consumer - partitionConsumer sarama.PartitionConsumer -} - -type saramaClientMock struct { -} - -// GetAsyncProducerMock returns mocked implementation of async producer that doesn't -// need connection to Kafka broker and can be used for testing purposes. -func GetAsyncProducerMock(t mocks.ErrorReporter) (*AsyncProducer, *mocks.AsyncProducer) { - saramaCfg := sarama.NewConfig() - saramaCfg.Producer.Return.Successes = true - mock := mocks.NewAsyncProducer(t, saramaCfg) - - cfg := NewConfig(logrus.DefaultLogger()) - cfg.SetSendSuccess(true) - cfg.SetSuccessChan(make(chan *ProducerMessage, 1)) - ap := AsyncProducer{Logger: logrus.DefaultLogger(), Config: cfg, Producer: mock, closeChannel: make(chan struct{}), Client: &saramaClientMock{}} - go ap.successHandler(mock.Successes()) - - return &ap, mock -} - -// GetSyncProducerMock returns mocked implementation of sync producer that doesn't need -// connection to Kafka broker and can be used for testing purposes. -func GetSyncProducerMock(t mocks.ErrorReporter) (*SyncProducer, *mocks.SyncProducer) { - saramaCfg := sarama.NewConfig() - saramaCfg.Producer.Return.Successes = true - mock := mocks.NewSyncProducer(t, saramaCfg) - - cfg := NewConfig(logrus.DefaultLogger()) - ap := SyncProducer{Logger: logrus.DefaultLogger(), Config: cfg, Producer: mock, closeChannel: make(chan struct{}), Client: &saramaClientMock{}} - - return &ap, mock -} - -// GetConsumerMock returns mocked implementation of consumer that doesn't need connection -// to kafka cluster. -func GetConsumerMock(t mocks.ErrorReporter) *Consumer { - cfg := NewConfig(logrus.DefaultLogger()) - ap := Consumer{ - Logger: logrus.DefaultLogger(), - Config: cfg, - Consumer: newClusterConsumerMock(t), - closeChannel: make(chan struct{}), - } - - return &ap -} - -func newClusterConsumerMock(t mocks.ErrorReporter) *clusterConsumerMock { - cfg := sarama.NewConfig() - mockSaramaConsumer := mocks.NewConsumer(t, cfg) - cl := &clusterConsumerMock{ - notifCh: make(chan *cluster.Notification), - errCh: make(chan error), - consumer: mockSaramaConsumer, - } - mockSaramaConsumer.ExpectConsumePartition("topic", 0, sarama.OffsetOldest) - - cl.partitionConsumer, _ = cl.consumer.ConsumePartition("topic", 0, sarama.OffsetOldest) - - return cl -} - -func (c *clusterConsumerMock) Notifications() <-chan *cluster.Notification { - return c.notifCh -} - -func (c *clusterConsumerMock) Errors() <-chan error { - return c.errCh -} - -func (c *clusterConsumerMock) Messages() <-chan *sarama.ConsumerMessage { - return c.partitionConsumer.Messages() -} - -func (c *clusterConsumerMock) Close() (err error) { - close(c.notifCh) - c.partitionConsumer.Close() - c.consumer.Close() - return nil -} - -func (c *clusterConsumerMock) MarkOffset(msg *sarama.ConsumerMessage, metadata string) { - -} - -func (c *clusterConsumerMock) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) { - -} - -func (c *clusterConsumerMock) Subscriptions() map[string][]int32 { - return map[string][]int32{} -} - -func (c *clusterConsumerMock) CommitOffsets() error { - return nil -} - -func (cl *saramaClientMock) Config() *sarama.Config { - return nil -} - -func (cl *saramaClientMock) Brokers() []*sarama.Broker { - return nil -} - -func (cl *saramaClientMock) Topics() ([]string, error) { - return nil, nil -} - -func (cl *saramaClientMock) Partitions(topic string) ([]int32, error) { - return nil, nil -} - -func (cl *saramaClientMock) WritablePartitions(topic string) ([]int32, error) { - return nil, nil -} - -func (cl *saramaClientMock) Leader(topic string, partitionID int32) (*sarama.Broker, error) { - return nil, nil -} - -func (cl *saramaClientMock) Replicas(topic string, partitionID int32) ([]int32, error) { - return nil, nil -} - -func (cl *saramaClientMock) RefreshMetadata(topics ...string) error { - return nil -} - -func (cl *saramaClientMock) GetOffset(topic string, partitionID int32, time int64) (int64, error) { - return 0, nil -} - -func (cl *saramaClientMock) Coordinator(consumerGroup string) (*sarama.Broker, error) { - return nil, nil -} - -func (cl *saramaClientMock) RefreshCoordinator(consumerGroup string) error { - return nil -} - -func (cl *saramaClientMock) InSyncReplicas(topic string, partitionID int32) ([]int32, error) { - return nil, nil -} - -func (cl *saramaClientMock) Close() error { - return nil -} - -func (cl *saramaClientMock) Closed() bool { - return false -} diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/client/syncproducer.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/client/syncproducer.go deleted file mode 100644 index 8b8fa0bc96..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/client/syncproducer.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "crypto/md5" - "errors" - "fmt" - "sync" - - "github.com/Shopify/sarama" - "github.com/ligato/cn-infra/logging" -) - -// SyncProducer allows to publish messages to kafka using synchronous API. -type SyncProducer struct { - logging.Logger - Config *Config - Client sarama.Client - Producer sarama.SyncProducer - Partition int32 - closed bool - xwg *sync.WaitGroup - closeChannel chan struct{} - sync.Mutex -} - -// NewSyncProducer returns a new SyncProducer instance. Producer is created from provided sarama client which can be nil; -// in that case, a new client is created. Also the partitioner is set here. Note: provided sarama client partitioner -// should match the one used in config. -func NewSyncProducer(config *Config, sClient sarama.Client, partitioner string, wg *sync.WaitGroup) (*SyncProducer, error) { - if config.Debug { - config.Logger.SetLevel(logging.DebugLevel) - } - - config.Logger.Debug("entering NewSyncProducer ...") - if err := config.ValidateSyncProducerConfig(); err != nil { - return nil, err - } - - // set "RequiredAcks" for producer - if config.RequiredAcks == AcksUnset { - config.RequiredAcks = WaitForAll - } - err := setProducerRequiredAcks(config) - if err != nil { - return nil, errors.New("invalid RequiredAcks field in config") - } - - // set partitioner - config.SetPartitioner(partitioner) - - // initProducer object - sp := &SyncProducer{ - Logger: config.Logger, - Config: config, - Partition: config.Partition, - closed: false, - closeChannel: make(chan struct{}), - } - - // If client is nil, create a new one - if sClient == nil { - localClient, err := NewClient(config, partitioner) - if err != nil { - return nil, err - } - // store local client in syncProducer if it was created here - sp.Client = localClient - sClient = localClient - } - - producer, err := sarama.NewSyncProducerFromClient(sClient) - if err != nil { - return nil, err - } - sp.Producer = producer - - // if there is a "waitgroup" arg then use it - if wg != nil { - sp.xwg = wg - sp.xwg.Add(1) - } - - return sp, nil -} - -// Close closes the client and producer -func (ref *SyncProducer) Close() error { - defer func() { - if ref.closed { - ref.Unlock() - return - } - ref.closed = true - close(ref.closeChannel) - - // decrement external waitgroup - if ref.xwg != nil { - ref.xwg.Done() - } - - ref.Unlock() - }() - - ref.Lock() - if ref.closed { - return nil - } - - err := ref.Producer.Close() - if err != nil { - ref.Errorf("SyncProducer close error: %v", err) - return err - } - ref.Debug("SyncProducer closed") - - if ref.Client != nil && !ref.Client.Closed() { - err = ref.Client.Close() - if err != nil { - ref.Errorf("client close error: %v", err) - return err - } - } - - return nil -} - -// SendMsgByte sends a message to Kafka -func (ref *SyncProducer) SendMsgByte(topic string, key []byte, msg []byte) (*ProducerMessage, error) { - // generate a key if none supplied (used by hash partitioner) - ref.WithFields(logging.Fields{"key": key, "msg": msg}).Debug("Sending") - - if key == nil || len(key) == 0 { - md5Sum := fmt.Sprintf("%x", md5.Sum(msg)) - return ref.SendMsgToPartition(topic, ref.Partition, sarama.ByteEncoder(md5Sum), sarama.ByteEncoder(msg)) - } - return ref.SendMsgToPartition(topic, ref.Partition, sarama.ByteEncoder(key), sarama.ByteEncoder(msg)) -} - -// SendMsgToPartition sends a message to Kafka -func (ref *SyncProducer) SendMsgToPartition(topic string, partition int32, key sarama.Encoder, msg sarama.Encoder) (*ProducerMessage, error) { - if msg == nil { - err := errors.New("nil message can not be sent") - ref.Error(err) - return nil, err - } - message := &sarama.ProducerMessage{ - Topic: topic, - Partition: partition, - Value: msg, - Key: key, - } - - partition, offset, err := ref.Producer.SendMessage(message) - pmsg := &ProducerMessage{ - Topic: message.Topic, - Key: message.Key, - Value: message.Value, - Metadata: message.Metadata, - Offset: offset, - Partition: partition, - } - if err != nil { - return pmsg, err - } - - ref.Debugf("message sent: %s", pmsg) - return pmsg, nil -} - -// setProducerRequiredAcks set the RequiredAcks field for a producer -func setProducerRequiredAcks(cfg *Config) error { - switch cfg.RequiredAcks { - case NoResponse: - cfg.ProducerConfig().Producer.RequiredAcks = sarama.NoResponse - return nil - case WaitForLocal: - cfg.ProducerConfig().Producer.RequiredAcks = sarama.WaitForLocal - return nil - case WaitForAll: - cfg.ProducerConfig().Producer.RequiredAcks = sarama.WaitForAll - return nil - default: - return errors.New("Invalid RequiredAcks type") - } -} - -// IsClosed returns the "closed" status -func (ref *SyncProducer) IsClosed() bool { - ref.Lock() - defer ref.Unlock() - - return ref.closed -} - -// WaitForClose returns when the producer is closed -func (ref *SyncProducer) WaitForClose() { - <-ref.closeChannel -} - -// GetCloseChannel returns a channel that is closed on asyncProducer cleanup -func (ref *SyncProducer) GetCloseChannel() <-chan struct{} { - return ref.closeChannel -} diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/doc.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/doc.go deleted file mode 100644 index eb32225950..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/doc.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package kafka implements a client for the Kafka broker. The client supports -sending and receiving of messages through the Kafka message bus. It provides -both sync and async Producers for sending Kafka messages and a Consumer for -retrieving Kafka messages. - -A Producer sends messages to Kafka. A Producer can be either synchronous -or asynchronous. Request to send a message using a synchronous producer -blocks until the message is published or an error is returned. A request -sent using asynchronous producer returns immediately and the success or -failure is communicated to the sender through a separate status channels. - -A Consumer receives messages from Kafka for one or more topics. When a -consumer is initialized,it automatically balances/shares the total number -partitions for a message topic over all the active brokers for a topic. -Message offsets can optionally be committed to Kafka so that when a consumer -is restarted or a new consumer is initiated it knows where to begin reading -messages from the Kafka message log. - -The package also provides a Multiplexer that allows to share consumer and -producers instances among multiple entities called Connections. Apart from -reusing the access to kafka brokers, the Multiplexer marks the offset of -consumed message as read. Offset marking allows to consume messages from the -last committed offset after the restart of the Multiplexer. - -Note: Marking offset does not necessarily commit the offset to the backend -store immediately. This might result in a corner case where a message might -be delivered multiple times. - -Usage of synchronous producer: - // create minimal configuration - config := client.NewConfig() - config.SetBrokers("ip_addr:port", "ip_addr2:port") - - - producer, err := client.NewSyncProducer(config, nil) - if err != nil { - os.Exit(1) - } - // key and value are of type []byte - producer.SendMsgByte(topic, key, value, meta) - - // key and value are of type Encoder - producer.SendMsgToPartition(topic, key, value, meta) - -Usage of asynchronous producer: - succCh := make(chan *client.ProducerMessage) - errCh := make(chan *client.ProducerError) - - // init config - config := client.NewConfig() - config.SetSendSuccess(true) - config.SetSuccessChan(succCh) - config.SetSendError(true) - config.SetErrorChan(errCh) - config.SetBrokers("ip_addr:port", "ip_addr2:port") - - // init producer - producer, err := client.NewAsyncProducer(config, nil) - - go func() { - eventLoop: - for { - select { - case <-producer.GetCloseChannel(): - break eventLoop - case msg := <-succCh: - fmt.Println("message sent successfully - ", msg) - case err := <-errCh: - fmt.Println("message errored - ", err) - } - } - }() - - producer.SendMsgByte(topic, key, value, meta) - -Usage of consumer: - config := client.NewConfig() - config.SetRecvNotification(true) - config.SetRecvNotificationChan(make(chan *cluster.Notification)) - config.SetRecvError(true) - config.SetRecvErrorChan(make(chan error)) - config.SetRecvMessageChan(make(chan *client.ConsumerMessage)) - config.SetBrokers("ip_addr:port", "ip_addr2:port2") - config.SetTopics("topic1,topic2") - config.SetGroup("Group1") - - consumer, err := client.NewConsumer(config, nil) - if err != nil { - log.Errorf("NewConsumer Error: %v", err) - os.Exit(1) - } - - go func() { - for { - select { - case notification := <-config.RecvNotificationChan: - handleNotifcation(consumer) - case err := <-config.RecvErrorChan: - fmt.Printf("Message Recv Errored: %v\n", err) - case msg := <-config.RecvMessageChan: - messageCallback(consumer, msg, *commit) - case <-consumer.GetCloseChannel(): - return - } - } - }() - - -In addition to basic sync/async producer and consumer the Multiplexer is provided. It's behaviour is depicted below: - - - +---------------+ +--------------------+ - | Connection #1 |------+ | Multiplexer | - +---------------+ | | | - | | sync producer | - +---------------+ | | async producer | /------------\ - | Connection #2 |------+-----> | consumer |<---------->/ Kafka \ - +---------------+ | | | \--------------/ - | | | - +---------------+ | | | - | Connection #3 |------+ +--------------------+ - +---------------+ - -To initialize multiplexer run: - - mx, err := mux.InitMultiplexer(pathToConfig, "name") - -The config file specifies addresses of kafka brokers: - addrs: - - "ip_addr1:port" - - "ip_addr2:port" - -To create a Connection that reuses Multiplexer access to kafka run: - - cn := mx.NewBytesConnection("c1") - - or - - cn := mx.NewProtoConnection("c1") - -Afterwards you can produce messages using sync API: - - partition, offset, err := cn.SendSyncString("test", "key", "value") - -or you can use async API: - succCh := make(chan *client.ProducerMessage, 10) - errCh := make(chan *client.ProducerError, 10) - cn.SendAsyncString("test", "key", "async message", "meta", succCh, errCh) - - // check if the async send succeeded - go func() { - select { - case success := <-succCh: - fmt.Println("Successfully send async msg", success.Metadata) - case err := <-errCh: - fmt.Println("Error while sending async msg", err.Err, err.Msg.Metadata) - } - }() - -subscribe to consume a topic: - consumerChan := make(chan *client.ConsumerMessage - err = cn.ConsumeTopic("test", consumerChan) - - if err == nil { - fmt.Println("Consuming test partition") - go func() { - eventLoop: - for { - select { - case msg := <-consumerChan: - fmt.Println(string(msg.Key), string(msg.Value)) - case <-signalChan: - break eventLoop - } - } - }() - } - - -Once all connection have subscribed for topic consumption. You have to run the following function -to actually initialize the consumer inside the Multiplexer. - - mx.Start() - -To properly clean up the Multiplexer call: - - mx.Close() - - -The KAFKA plugin - -Once kafka plugin is initialized - plugin := kafka.Plugin{} - // Init called by agent core - -The plugin allows to create connections: - - conn := plugin.NewConnection("name") - -or connection that support proto-modelled messages: - - protoConn := plugin.NewProtoConnection("protoConnection") -The usage of connections is described above. - -*/ -package kafka diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/kafka.conf b/vendor/github.com/ligato/cn-infra/messaging/kafka/kafka.conf deleted file mode 100644 index a537415f7e..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/kafka.conf +++ /dev/null @@ -1,9 +0,0 @@ -# Kafka server addresses -addrs: - - "127.0.0.1:9092" - -# Name of the consumer's group. -group_id: name - -# Crypto/TLS configuration -tls: \ No newline at end of file diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/README.md b/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/README.md deleted file mode 100644 index b77fff9dfb..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# Multiplexer - -The multiplexer instance has an access to kafka Brokers. To share the access it allows to create connections. -There are available two connection types one support message of type `[]byte` and the other `proto.Message`. -Both of them allows to create several SyncPublishers and AsyncPublishers that implements `BytesPublisher` interface -or `ProtoPubliser` respectively. The connections also provide API for consuming messages implementing `BytesMessage` -interface or `ProtoMessage` respectively. - - -``` - - +-----------------+ +---------------+ - | | | | - | Kafka brokers | +--------------+ +----| SyncPublisher | - | | | | | | | - +--------^--------+ +---| Connection <-----+ +---------------+ - | | | | - +---------+----------+ | +--------------+ - | Multiplexer | | - | <--+ - | SyncProducer <--+ +--------------+ - | AsyncProducer | | | | - | Consumer | | | Connection <-----+ +----------------+ - | | +---| | | | | - | | +--------------+ +----| AsyncPublisher | - +--------------------+ | | - +----------------+ - -``` \ No newline at end of file diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/bytes_connection.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/bytes_connection.go deleted file mode 100644 index 058da3339b..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/bytes_connection.go +++ /dev/null @@ -1,323 +0,0 @@ -package mux - -import ( - "fmt" - - "github.com/Shopify/sarama" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/messaging/kafka/client" -) - -// BytesConnection is interface for multiplexer with dynamic partitioner. -type BytesConnection interface { - // Creates new synchronous publisher allowing to publish kafka messages - NewSyncPublisher(topic string) (BytesPublisher, error) - // Creates new asynchronous publisher allowing to publish kafka messages - NewAsyncPublisher(topic string, successClb func(*client.ProducerMessage), errorClb func(err *client.ProducerError)) (BytesPublisher, error) -} - -// BytesManualConnection is interface for multiplexer with manual partitioner. -type BytesManualConnection interface { - // Creates new synchronous publisher allowing to publish kafka messages to chosen partition - NewSyncPublisherToPartition(topic string, partition int32) (BytesPublisher, error) - // Creates new asynchronous publisher allowing to publish kafka messages to chosen partition - NewAsyncPublisherToPartition(topic string, partition int32, successClb func(*client.ProducerMessage), errorClb func(err *client.ProducerError)) (BytesPublisher, error) -} - -// BytesConnectionStr represents connection built on hash-mode multiplexer -type BytesConnectionStr struct { - BytesConnectionFields -} - -// BytesManualConnectionStr represents connection built on manual-mode multiplexer -type BytesManualConnectionStr struct { - BytesConnectionFields -} - -// BytesConnectionFields is an entity that provides access to shared producers/consumers of multiplexer -type BytesConnectionFields struct { - // multiplexer is used for access to kafka brokers - multiplexer *Multiplexer - // name identifies the connection - name string -} - -// BytesPublisher allows to publish a message of type []bytes into messaging system. -type BytesPublisher interface { - Put(key string, data []byte) error -} - -type bytesSyncPublisherKafka struct { - conn *BytesConnectionStr - topic string -} - -type bytesAsyncPublisherKafka struct { - conn *BytesConnectionStr - topic string - succCallback func(*client.ProducerMessage) - errCallback func(*client.ProducerError) -} - -type bytesManualSyncPublisherKafka struct { - conn *BytesManualConnectionStr - topic string - partition int32 -} - -type bytesManualAsyncPublisherKafka struct { - conn *BytesManualConnectionStr - topic string - partition int32 - succCallback func(*client.ProducerMessage) - errCallback func(*client.ProducerError) -} - -// NewSyncPublisher creates a new instance of bytesSyncPublisherKafka that allows to publish sync kafka messages using common messaging API -func (conn *BytesConnectionStr) NewSyncPublisher(topic string) (BytesPublisher, error) { - return &bytesSyncPublisherKafka{conn, topic}, nil -} - -// NewAsyncPublisher creates a new instance of bytesAsyncPublisherKafka that allows to publish async kafka messages using common messaging API -func (conn *BytesConnectionStr) NewAsyncPublisher(topic string, successClb func(*client.ProducerMessage), errorClb func(err *client.ProducerError)) (BytesPublisher, error) { - return &bytesAsyncPublisherKafka{conn, topic, successClb, errorClb}, nil -} - -// NewSyncPublisherToPartition creates a new instance of bytesSyncPublisherKafka that allows to publish sync kafka messages using common messaging API -func (conn *BytesManualConnectionStr) NewSyncPublisherToPartition(topic string, partition int32) (BytesPublisher, error) { - return &bytesManualSyncPublisherKafka{conn, topic, partition}, nil -} - -// NewAsyncPublisherToPartition creates a new instance of bytesAsyncPublisherKafka that allows to publish async kafka messages using common messaging API -func (conn *BytesManualConnectionStr) NewAsyncPublisherToPartition(topic string, partition int32, successClb func(*client.ProducerMessage), errorClb func(err *client.ProducerError)) (BytesPublisher, error) { - return &bytesManualAsyncPublisherKafka{conn, topic, partition, successClb, errorClb}, nil -} - -// ConsumeTopic is called to start consuming of a topic. -// Function can be called until the multiplexer is started, it returns an error otherwise. -// The provided channel should be buffered, otherwise messages might be lost. -func (conn *BytesConnectionStr) ConsumeTopic(msgClb func(message *client.ConsumerMessage), topics ...string) error { - conn.multiplexer.rwlock.Lock() - defer conn.multiplexer.rwlock.Unlock() - - if conn.multiplexer.started { - return fmt.Errorf("ConsumeTopic can be called only if the multiplexer has not been started yet") - } - - for _, topic := range topics { - // check if we have already consumed the topic - var found bool - var subs *consumerSubscription - LoopSubs: - for _, subscription := range conn.multiplexer.mapping { - if subscription.manual == true { - // do not mix dynamic and manual mode - continue - } - if subscription.topic == topic { - found = true - subs = subscription - break LoopSubs - } - } - - if !found { - subs = &consumerSubscription{ - manual: false, // non-manual example - topic: topic, - connectionName: conn.name, - byteConsMsg: msgClb, - } - // subscribe new topic - conn.multiplexer.mapping = append(conn.multiplexer.mapping, subs) - } - - // add subscription to consumerList - subs.byteConsMsg = msgClb - } - - return nil -} - -// ConsumePartition is called to start consuming given topic on partition with offset -// Function can be called until the multiplexer is started, it returns an error otherwise. -// The provided channel should be buffered, otherwise messages might be lost. -func (conn *BytesManualConnectionStr) ConsumePartition(msgClb func(message *client.ConsumerMessage), topic string, partition int32, offset int64) error { - conn.multiplexer.rwlock.Lock() - defer conn.multiplexer.rwlock.Unlock() - var err error - - // check if we have already consumed the topic on partition and offset - var found bool - var subs *consumerSubscription - - for _, subscription := range conn.multiplexer.mapping { - if subscription.manual == false { - // do not mix dynamic and manual mode - continue - } - if subscription.topic == topic && subscription.partition == partition && subscription.offset == offset { - found = true - subs = subscription - break - } - } - - if !found { - subs = &consumerSubscription{ - manual: true, // manual example - topic: topic, - partition: partition, - offset: offset, - connectionName: conn.name, - byteConsMsg: msgClb, - } - // subscribe new topic on partition - conn.multiplexer.mapping = append(conn.multiplexer.mapping, subs) - } - - // add subscription to consumerList - subs.byteConsMsg = msgClb - - if conn.multiplexer.started { - conn.multiplexer.Infof("Starting 'post-init' manual Consumer") - subs.partitionConsumer, err = conn.StartPostInitConsumer(topic, partition, offset) - if err != nil { - return err - } - if subs.partitionConsumer == nil { - return nil - } - } - - return nil -} - -// StartPostInitConsumer allows to start a new partition consumer after mux is initialized -func (conn *BytesManualConnectionStr) StartPostInitConsumer(topic string, partition int32, offset int64) (*sarama.PartitionConsumer, error) { - multiplexer := conn.multiplexer - multiplexer.WithFields(logging.Fields{"topic": topic}).Debugf("Post-init consuming started") - - if multiplexer.Consumer == nil || multiplexer.Consumer.SConsumer == nil { - multiplexer.Warn("Unable to start post-init Consumer, client not available in the mux") - return nil, nil - } - - // Consumer that reads topic/partition/offset. Throws error if offset is 'in the future' (message with offset does not exist yet) - partitionConsumer, err := multiplexer.Consumer.SConsumer.ConsumePartition(topic, partition, offset) - if err != nil { - return nil, err - } - multiplexer.Consumer.StartConsumerManualHandlers(partitionConsumer) - - return &partitionConsumer, nil -} - -// StopConsuming cancels the previously created subscription for consuming the topic. -func (conn *BytesConnectionStr) StopConsuming(topic string) error { - return conn.multiplexer.stopConsuming(topic, conn.name) -} - -// StopConsumingPartition cancels the previously created subscription for consuming the topic, partition and offset -func (conn *BytesManualConnectionStr) StopConsumingPartition(topic string, partition int32, offset int64) error { - return conn.multiplexer.stopConsumingPartition(topic, partition, offset, conn.name) -} - -//SendSyncMessage sends a message using the sync API and default partitioner -func (conn *BytesConnectionStr) SendSyncMessage(topic string, key client.Encoder, value client.Encoder) (offset int64, err error) { - msg, err := conn.multiplexer.hashSyncProducer.SendMsgToPartition(topic, DefPartition, key, value) - if err != nil { - return 0, err - } - return msg.Offset, err -} - -// SendAsyncMessage sends a message using the async API and default partitioner -func (conn *BytesConnectionStr) SendAsyncMessage(topic string, key client.Encoder, value client.Encoder, meta interface{}, successClb func(*client.ProducerMessage), errClb func(*client.ProducerError)) { - auxMeta := &asyncMeta{successClb: successClb, errorClb: errClb, usersMeta: meta} - conn.multiplexer.hashAsyncProducer.SendMsgToPartition(topic, DefPartition, key, value, auxMeta) -} - -//SendSyncMessageToPartition sends a message using the sync API and default partitioner -func (conn *BytesManualConnectionStr) SendSyncMessageToPartition(topic string, partition int32, key client.Encoder, value client.Encoder) (offset int64, err error) { - msg, err := conn.multiplexer.manSyncProducer.SendMsgToPartition(topic, partition, key, value) - if err != nil { - return 0, err - } - return msg.Offset, err -} - -// SendAsyncMessageToPartition sends a message using the async API and default partitioner -func (conn *BytesManualConnectionStr) SendAsyncMessageToPartition(topic string, partition int32, key client.Encoder, value client.Encoder, meta interface{}, successClb func(*client.ProducerMessage), errClb func(*client.ProducerError)) { - auxMeta := &asyncMeta{successClb: successClb, errorClb: errClb, usersMeta: meta} - conn.multiplexer.manAsyncProducer.SendMsgToPartition(topic, partition, key, value, auxMeta) -} - -// SendSyncByte sends a message that uses byte encoder using the sync API -func (conn *BytesConnectionStr) SendSyncByte(topic string, key []byte, value []byte) (offset int64, err error) { - return conn.SendSyncMessage(topic, sarama.ByteEncoder(key), sarama.ByteEncoder(value)) -} - -// SendSyncString sends a message that uses string encoder using the sync API -func (conn *BytesConnectionStr) SendSyncString(topic string, key string, value string) (offset int64, err error) { - return conn.SendSyncMessage(topic, sarama.StringEncoder(key), sarama.StringEncoder(value)) -} - -// SendSyncStringToPartition sends a message that uses string encoder using the sync API to custom partition -func (conn *BytesManualConnectionStr) SendSyncStringToPartition(topic string, partition int32, key string, value string) (offset int64, err error) { - return conn.SendSyncMessageToPartition(topic, partition, sarama.StringEncoder(key), sarama.StringEncoder(value)) -} - -// SendAsyncByte sends a message that uses byte encoder using the async API -func (conn *BytesConnectionStr) SendAsyncByte(topic string, key []byte, value []byte, meta interface{}, successClb func(*client.ProducerMessage), errClb func(*client.ProducerError)) { - conn.SendAsyncMessage(topic, sarama.ByteEncoder(key), sarama.ByteEncoder(value), meta, successClb, errClb) -} - -// SendAsyncString sends a message that uses string encoder using the async API -func (conn *BytesConnectionStr) SendAsyncString(topic string, key string, value string, meta interface{}, successClb func(*client.ProducerMessage), errClb func(*client.ProducerError)) { - conn.SendAsyncMessage(topic, sarama.StringEncoder(key), sarama.StringEncoder(value), meta, successClb, errClb) -} - -// SendAsyncStringToPartition sends a message that uses string encoder using the async API to custom partition -func (conn *BytesManualConnectionStr) SendAsyncStringToPartition(topic string, partition int32, key string, value string, meta interface{}, successClb func(*client.ProducerMessage), errClb func(*client.ProducerError)) { - conn.SendAsyncMessageToPartition(topic, partition, sarama.StringEncoder(key), sarama.StringEncoder(value), meta, successClb, errClb) -} - -// Put publishes a message into kafka -func (p *bytesSyncPublisherKafka) Put(key string, data []byte) error { - _, err := p.conn.SendSyncMessage(p.topic, sarama.StringEncoder(key), sarama.ByteEncoder(data)) - return err -} - -// Put publishes a message into kafka -func (p *bytesAsyncPublisherKafka) Put(key string, data []byte) error { - p.conn.SendAsyncMessage(p.topic, sarama.StringEncoder(key), sarama.ByteEncoder(data), nil, p.succCallback, p.errCallback) - return nil -} - -// Put publishes a message into kafka -func (p *bytesManualSyncPublisherKafka) Put(key string, data []byte) error { - _, err := p.conn.SendSyncMessageToPartition(p.topic, p.partition, sarama.StringEncoder(key), sarama.ByteEncoder(data)) - return err -} - -// Put publishes a message into kafka -func (p *bytesManualAsyncPublisherKafka) Put(key string, data []byte) error { - p.conn.SendAsyncMessageToPartition(p.topic, p.partition, sarama.StringEncoder(key), sarama.ByteEncoder(data), nil, p.succCallback, p.errCallback) - return nil -} - -// MarkOffset marks the specified message as read -func (conn *BytesConnectionFields) MarkOffset(msg client.ConsumerMessage, metadata string) { - if conn.multiplexer != nil && conn.multiplexer.Consumer != nil { - conn.multiplexer.Consumer.MarkOffset(&msg, metadata) - } -} - -// CommitOffsets manually commits message offsets -func (conn *BytesConnectionFields) CommitOffsets() error { - if conn.multiplexer != nil && conn.multiplexer.Consumer != nil { - return conn.multiplexer.Consumer.CommitOffsets() - } - return fmt.Errorf("cannot commit offsets, consumer not available") -} diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/chan.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/chan.go deleted file mode 100644 index 26d0af1bff..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/chan.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mux - -import ( - "github.com/ligato/cn-infra/messaging" - "github.com/ligato/cn-infra/messaging/kafka/client" - "time" -) - -// ToBytesMsgChan allows to receive ConsumerMessage through channel. This function can be used as an argument for -// ConsumeTopic call. -func ToBytesMsgChan(ch chan *client.ConsumerMessage, opts ...interface{}) func(*client.ConsumerMessage) { - - timeout, logger := messaging.ParseOpts(opts...) - - return func(msg *client.ConsumerMessage) { - select { - case ch <- msg: - case <-time.After(timeout): - logger.Warn("Unable to deliver message") - } - } -} - -// ToBytesProducerChan allows to receive ProducerMessage through channel. This function can be used as an argument for -// methods publishing using async API. -func ToBytesProducerChan(ch chan *client.ProducerMessage, opts ...interface{}) func(*client.ProducerMessage) { - - timeout, logger := messaging.ParseOpts(opts...) - - return func(msg *client.ProducerMessage) { - select { - case ch <- msg: - case <-time.After(timeout): - logger.Warn("Unable to deliver message") - } - } -} - -// ToBytesProducerErrChan allows to receive ProducerMessage through channel. This function can be used as an argument for -// methods publishing using async API. -func ToBytesProducerErrChan(ch chan *client.ProducerError, opts ...interface{}) func(*client.ProducerError) { - - timeout, logger := messaging.ParseOpts(opts...) - - return func(msg *client.ProducerError) { - select { - case ch <- msg: - case <-time.After(timeout): - logger.Warn("Unable to deliver message") - } - } -} diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/config.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/config.go deleted file mode 100644 index 26b3fa2b7f..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/config.go +++ /dev/null @@ -1,153 +0,0 @@ -package mux - -import ( - "github.com/Shopify/sarama" - "github.com/ligato/cn-infra/config" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/messaging/kafka/client" - "github.com/ligato/cn-infra/utils/clienttls" - "time" -) - -const ( - // DefAddress default kafka address/port (if not specified in config) - DefAddress = "127.0.0.1:9092" - // DefPartition is used if no specific partition is set - DefPartition = 0 - // OffsetNewest is head offset which will be assigned to the new message produced to the partition - OffsetNewest = sarama.OffsetNewest - // OffsetOldest is oldest offset available on the partition - OffsetOldest = sarama.OffsetOldest -) - -// Config holds the settings for kafka multiplexer. -type Config struct { - Addrs []string `json:"addrs"` - GroupID string `json:"group_id"` - TLS clienttls.TLS `json:"tls"` -} - -// ConsumerFactory produces a consumer for the selected topics in a specified consumer group. -// The reason why a function(factory) is passed to Multiplexer instead of consumer instance is -// that list of topics to be consumed has to be known on consumer initialization. -// Multiplexer calls the function once the list of topics to be consumed is selected. -type ConsumerFactory func(topics []string, groupId string) (*client.Consumer, error) - -// ConfigFromFile loads the Kafka multiplexer configuration from the -// specified file. If the specified file is valid and contains -// valid configuration, the parsed configuration is -// returned; otherwise, an error is returned. -func ConfigFromFile(fpath string) (*Config, error) { - cfg := &Config{} - err := config.ParseConfigFromYamlFile(fpath, cfg) - if err != nil { - return nil, err - } - return cfg, err -} - -func getConsumerFactory(config *client.Config) ConsumerFactory { - return func(topics []string, groupId string) (*client.Consumer, error) { - config.SetRecvMessageChan(make(chan *client.ConsumerMessage)) - config.Topics = topics - config.GroupID = groupId - config.SetInitialOffset(sarama.OffsetOldest) - - // create new consumer and start message handlers - return client.NewConsumer(config, nil) - } -} - -// InitMultiplexer initialize and returns new kafka multiplexer based on the supplied config file. -// Name is used as groupId identification of consumer. Kafka allows to store last read offset for -// a groupId. This is leveraged to deliver unread messages after restart. -func InitMultiplexer(configFile string, name string, log logging.Logger) (*Multiplexer, error) { - var err error - var tls clienttls.TLS - cfg := &Config{[]string{DefAddress}, "", tls} - if configFile != "" { - cfg, err = ConfigFromFile(configFile) - if err != nil { - return nil, err - } - } - - // prepare client config - clientCfg := client.NewConfig(log) - clientCfg.SetSendSuccess(true) - clientCfg.SetSuccessChan(make(chan *client.ProducerMessage)) - clientCfg.SetSendError(true) - clientCfg.SetErrorChan(make(chan *client.ProducerError)) - clientCfg.SetBrokers(cfg.Addrs...) - if cfg.TLS.Enabled { - tlsConfig, err := clienttls.CreateTLSConfig(cfg.TLS) - if err != nil { - return nil, err - } - clientCfg.SetTLS(tlsConfig) - } - - // create hash client - sClientHash, err := client.NewClient(clientCfg, client.Hash) - if err != nil { - return nil, err - } - - // create manual client - sClientManual, err := client.NewClient(clientCfg, client.Manual) - if err != nil { - return nil, err - } - - // todo client is currently set always as hash - return InitMultiplexerWithConfig(clientCfg, sClientHash, sClientManual, name, log) -} - -// InitMultiplexerWithConfig initialize and returns new kafka multiplexer based on the supplied mux configuration. -// Name is used as groupId identification of consumer. Kafka allows to store last read offset for a groupId. -// This is leveraged to deliver unread messages after restart. -func InitMultiplexerWithConfig(clientCfg *client.Config, hsClient sarama.Client, manClient sarama.Client, name string, log logging.Logger) (*Multiplexer, error) { - const errorFmt = "Failed to create Kafka %s, Configured broker(s) %v, Error: '%s'" - - log.WithField("addrs", hsClient.Brokers()).Debug("Kafka connecting") - - startTime := time.Now() - producers := multiplexerProducers{} - - // Prepare sync/async producer - if hsClient != nil { - hashSyncProducer, err := client.NewSyncProducer(clientCfg, hsClient, client.Hash, nil) - if err != nil { - log.Errorf(errorFmt, "SyncProducer (hash)", clientCfg.Brokers, err) - return nil, err - } - hashAsyncProducer, err := client.NewAsyncProducer(clientCfg, hsClient, client.Hash, nil) - if err != nil { - log.Errorf(errorFmt, "AsyncProducer", clientCfg.Brokers, err) - return nil, err - } - producers.hashSyncProducer = hashSyncProducer - producers.hashAsyncProducer = hashAsyncProducer - } - // Prepare manual sync/async producer - if manClient != nil { - manualSyncProducer, err := client.NewSyncProducer(clientCfg, manClient, client.Manual, nil) - if err != nil { - log.Errorf(errorFmt, "SyncProducer (manual)", clientCfg.Brokers, err) - return nil, err - } - - manualAsyncProducer, err := client.NewAsyncProducer(clientCfg, manClient, client.Manual, nil) - if err != nil { - log.Errorf(errorFmt, "AsyncProducer", clientCfg.Brokers, err) - return nil, err - } - producers.manSyncProducer = manualSyncProducer - producers.manAsyncProducer = manualAsyncProducer - } - - kafkaConnect := time.Since(startTime) - log.WithField("durationInNs", kafkaConnect.Nanoseconds()).Info("Connecting to kafka took ", kafkaConnect) - - return NewMultiplexer(getConsumerFactory(clientCfg), producers, clientCfg, name, log), nil -} diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/doc.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/doc.go deleted file mode 100644 index c129dc39aa..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package mux implements the session multiplexer that allows multiple -// plugins to share a single connection to a Kafka broker. -package mux diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/mock.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/mock.go deleted file mode 100644 index a674e7ae48..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/mock.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mux - -import ( - "testing" - - "github.com/Shopify/sarama/mocks" - "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/messaging/kafka/client" -) - -func getMockConsumerFactory(t *testing.T) ConsumerFactory { - return func(topics []string, name string) (*client.Consumer, error) { - return client.GetConsumerMock(t), nil - } -} - -// Mock returns mock of Multiplexer that can be used for testing purposes. -func Mock(t *testing.T) *KafkaMock { - asyncP, aMock := client.GetAsyncProducerMock(t) - syncP, sMock := client.GetSyncProducerMock(t) - producers := multiplexerProducers{ - syncP, syncP, asyncP, asyncP, - } - - return &KafkaMock{ - NewMultiplexer(getMockConsumerFactory(t), producers, &client.Config{}, "name", logrus.DefaultLogger()), - aMock, sMock} -} - -// KafkaMock for the tests -type KafkaMock struct { - Mux *Multiplexer - AsyncPub *mocks.AsyncProducer - SyncPub *mocks.SyncProducer -} diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/multiplexer.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/multiplexer.go deleted file mode 100644 index 2a65ca3798..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/multiplexer.go +++ /dev/null @@ -1,372 +0,0 @@ -package mux - -import ( - "fmt" - "sync" - - "github.com/Shopify/sarama" - - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/messaging/kafka/client" - "github.com/ligato/cn-infra/utils/safeclose" -) - -// Multiplexer encapsulates clients to kafka cluster (SyncProducer, AsyncProducer (both of them -// with 'hash' and 'manual' partitioner), consumer). It allows to create multiple Connections -// that use multiplexer's clients for communication with kafka cluster. The aim of Multiplexer -// is to decrease the number of connections needed. The set of topics to be consumed by -// Connections needs to be selected before the underlying consumer in Multiplexer is started. -// Once the Multiplexer's consumer has been started new topics can not be added. -type Multiplexer struct { - logging.Logger - - // consumer used by the Multiplexer (bsm/sarama cluster) - Consumer *client.Consumer - - // producers available for this mux - multiplexerProducers - - // client config - config *client.Config - - // name is used for identification of stored last consumed offset in kafka. This allows - // to follow up messages after restart. - name string - - // guards access to mapping and started flag - rwlock sync.RWMutex - - // started denotes whether the multiplexer is dispatching the messages or accepting subscriptions to - // consume a topic. Once the multiplexer is started, new subscription can not be added. - started bool - - // Mapping provides the mapping of subscribed consumers. Subscription contains topic, partition and offset to consume, - // as well as dynamic/manual mode flag - mapping []*consumerSubscription - - // factory that crates Consumer used in the Multiplexer - consumerFactory func(topics []string, groupId string) (*client.Consumer, error) -} - -// ConsumerSubscription contains all information about subscribed kafka consumer/watcher -type consumerSubscription struct { - // in manual mode, multiplexer is distributing messages according to topic, partition and offset. If manual - // mode is off, messages are distributed using topic only - manual bool - // topic to watch on - topic string - // partition to watch on in manual mode - partition int32 - // partition consumer created only in manual mode. Its value is stored in subscription (after all required handlers - // are started) in order to be properly closed if required - partitionConsumer *sarama.PartitionConsumer - // offset to watch on in manual mode - offset int64 - // name identifies the connection - connectionName string - // sends message to subscribed channel - byteConsMsg func(*client.ConsumerMessage) -} - -// asyncMeta is auxiliary structure used by Multiplexer to distribute consumer messages -type asyncMeta struct { - successClb func(*client.ProducerMessage) - errorClb func(error *client.ProducerError) - usersMeta interface{} -} - -// multiplexerProducers groups all mux producers -type multiplexerProducers struct { - // hashSyncProducer with hash partitioner used by the Multiplexer - hashSyncProducer *client.SyncProducer - // manSyncProducer with manual partitioner used by the Multiplexer - manSyncProducer *client.SyncProducer - // hashAsyncProducer with hash used by the Multiplexer - hashAsyncProducer *client.AsyncProducer - // manAsyncProducer with manual used by the Multiplexer - manAsyncProducer *client.AsyncProducer -} - -// NewMultiplexer creates new instance of Kafka Multiplexer -func NewMultiplexer(consumerFactory ConsumerFactory, producers multiplexerProducers, clientCfg *client.Config, - name string, log logging.Logger) *Multiplexer { - if clientCfg.Logger == nil { - clientCfg.Logger = log - } - cl := &Multiplexer{consumerFactory: consumerFactory, - Logger: log, - name: name, - mapping: []*consumerSubscription{}, - multiplexerProducers: producers, - config: clientCfg, - } - - go cl.watchAsyncProducerChannels() - if producers.manAsyncProducer != nil && producers.manAsyncProducer.Config != nil { - go cl.watchManualAsyncProducerChannels() - } - return cl -} - -func (mux *Multiplexer) watchAsyncProducerChannels() { - for { - select { - case err := <-mux.hashAsyncProducer.Config.ErrorChan: - mux.Println("AsyncProducer (hash): failed to produce message", err.Err) - errMsg := err.ProducerMessage - - if errMeta, ok := errMsg.Metadata.(*asyncMeta); ok && errMeta.errorClb != nil { - err.ProducerMessage.Metadata = errMeta.usersMeta - errMeta.errorClb(err) - } - case success := <-mux.hashAsyncProducer.Config.SuccessChan: - - if succMeta, ok := success.Metadata.(*asyncMeta); ok && succMeta.successClb != nil { - success.Metadata = succMeta.usersMeta - succMeta.successClb(success) - } - case <-mux.hashAsyncProducer.GetCloseChannel(): - mux.Debug("AsyncProducer (hash): closing watch loop") - } - } -} - -func (mux *Multiplexer) watchManualAsyncProducerChannels() { - for { - select { - case err := <-mux.manAsyncProducer.Config.ErrorChan: - mux.Println("AsyncProducer (manual): failed to produce message", err.Err) - errMsg := err.ProducerMessage - - if errMeta, ok := errMsg.Metadata.(*asyncMeta); ok && errMeta.errorClb != nil { - err.ProducerMessage.Metadata = errMeta.usersMeta - errMeta.errorClb(err) - } - case success := <-mux.manAsyncProducer.Config.SuccessChan: - - if succMeta, ok := success.Metadata.(*asyncMeta); ok && succMeta.successClb != nil { - success.Metadata = succMeta.usersMeta - succMeta.successClb(success) - } - case <-mux.manAsyncProducer.GetCloseChannel(): - mux.Debug("AsyncProducer (manual): closing watch loop") - } - - } -} - -// Start should be called once all the Connections have been subscribed -// for topic consumption. An attempt to start consuming a topic after the multiplexer is started -// returns an error. -func (mux *Multiplexer) Start() error { - mux.rwlock.Lock() - defer mux.rwlock.Unlock() - var err error - - if mux.started { - return fmt.Errorf("multiplexer has been started already") - } - - // block further Consumer consumers - mux.started = true - - var hashTopics, manTopics []string - - for _, subscription := range mux.mapping { - if subscription.manual { - manTopics = append(manTopics, subscription.topic) - continue - } - hashTopics = append(hashTopics, subscription.topic) - } - - mux.config.SetRecvMessageChan(make(chan *client.ConsumerMessage)) - mux.config.GroupID = mux.name - mux.config.SetInitialOffset(sarama.OffsetOldest) - mux.config.Topics = append(hashTopics, manTopics...) - - // create consumer - mux.WithFields(logging.Fields{"hashTopics": hashTopics, "manualTopics": manTopics}).Debugf("Consuming started") - mux.Consumer, err = client.NewConsumer(mux.config, nil) - if err != nil { - return err - } - - if len(hashTopics) == 0 { - mux.Debug("No topics for hash partitioner") - } else { - mux.WithFields(logging.Fields{"topics": hashTopics}).Debugf("Consuming (hash) started") - mux.Consumer.StartConsumerHandlers() - } - - if len(manTopics) == 0 { - mux.Debug("No topics for manual partitioner") - } else { - mux.WithFields(logging.Fields{"topics": manTopics}).Debugf("Consuming (manual) started") - for _, sub := range mux.mapping { - if sub.manual { - sConsumer := mux.Consumer.SConsumer - if sConsumer == nil { - return fmt.Errorf("consumer for manual partition is not available") - } - partitionConsumer, err := sConsumer.ConsumePartition(sub.topic, sub.partition, sub.offset) - if err != nil { - return err - } - // Store partition consumer in subscription so it can be closed lately - sub.partitionConsumer = &partitionConsumer - mux.Logger.WithFields(logging.Fields{"topic": sub.topic, "partition": sub.partition, "offset": sub.offset}).Info("Partition sConsumer started") - mux.Consumer.StartConsumerManualHandlers(partitionConsumer) - } - } - - } - - go mux.genericConsumer() - go mux.manualConsumer(mux.Consumer) - - return err -} - -// Close cleans up the resources used by the Multiplexer -func (mux *Multiplexer) Close() { - safeclose.Close( - mux.Consumer, - mux.hashSyncProducer, - mux.hashAsyncProducer, - mux.manSyncProducer, - mux.manAsyncProducer) -} - -// NewBytesConnection creates instance of the BytesConnectionStr that provides access to shared -// Multiplexer's clients with hash partitioner. -func (mux *Multiplexer) NewBytesConnection(name string) *BytesConnectionStr { - return &BytesConnectionStr{BytesConnectionFields{multiplexer: mux, name: name}} -} - -// NewBytesManualConnection creates instance of the BytesManualConnectionStr that provides access to shared -// Multiplexer's clients with manual partitioner. -func (mux *Multiplexer) NewBytesManualConnection(name string) *BytesManualConnectionStr { - return &BytesManualConnectionStr{BytesConnectionFields{multiplexer: mux, name: name}} -} - -// NewProtoConnection creates instance of the ProtoConnection that provides access to shared -// Multiplexer's clients with hash partitioner. -func (mux *Multiplexer) NewProtoConnection(name string, serializer keyval.Serializer) *ProtoConnection { - return &ProtoConnection{ProtoConnectionFields{multiplexer: mux, serializer: serializer, name: name}} -} - -// NewProtoManualConnection creates instance of the ProtoConnectionFields that provides access to shared -// Multiplexer's clients with manual partitioner. -func (mux *Multiplexer) NewProtoManualConnection(name string, serializer keyval.Serializer) *ProtoManualConnection { - return &ProtoManualConnection{ProtoConnectionFields{multiplexer: mux, serializer: serializer, name: name}} -} - -// Propagates incoming messages to respective channels. -func (mux *Multiplexer) propagateMessage(msg *client.ConsumerMessage) { - mux.rwlock.RLock() - defer mux.rwlock.RUnlock() - - if msg == nil { - return - } - - // Find subscribed topics. Note: topic can be subscribed for both dynamic and manual consuming - for _, subscription := range mux.mapping { - if msg.Topic == subscription.topic { - // Clustered mode - message is consumed only on right partition and offset - if subscription.manual { - if msg.Partition == subscription.partition && msg.Offset >= subscription.offset { - mux.Debug("offset ", msg.Offset, string(msg.Value), string(msg.Key), msg.Partition) - subscription.byteConsMsg(msg) - } - } else { - // Non-manual mode - // if we are not able to write into the channel we should skip the receiver - // and report an error to avoid deadlock - mux.Debug("offset ", msg.Offset, string(msg.Value), string(msg.Key), msg.Partition) - subscription.byteConsMsg(msg) - } - } - } -} - -// genericConsumer handles incoming messages to the multiplexer and distributes them among the subscribers. -func (mux *Multiplexer) genericConsumer() { - mux.Debug("Generic Consumer started") - for { - select { - case <-mux.Consumer.GetCloseChannel(): - mux.Debug("Closing Consumer") - return - case msg := <-mux.Consumer.Config.RecvMessageChan: - // 'hash' partitioner messages will be marked - mux.propagateMessage(msg) - case err := <-mux.Consumer.Config.RecvErrorChan: - mux.Error("Received partitionConsumer error ", err) - } - } -} - -// manualConsumer takes a consumer (even a post-init created) and handles incoming messages for them. -func (mux *Multiplexer) manualConsumer(consumer *client.Consumer) { - mux.Debug("Generic Consumer started") - for { - select { - case <-consumer.GetCloseChannel(): - mux.Debug("Closing Consumer") - return - case msg := <-consumer.Config.RecvMessageChan: - mux.Debug("Kafka message received") - // 'later-stage' Consumer does not consume 'hash' messages, none of them is marked - mux.propagateMessage(msg) - case err := <-consumer.Config.RecvErrorChan: - mux.Error("Received partitionConsumer error ", err) - } - } -} - -// Remove consumer subscription on given topic. If there is no such a subscription, return error. -func (mux *Multiplexer) stopConsuming(topic string, name string) error { - mux.rwlock.Lock() - defer mux.rwlock.Unlock() - - var wasError error - var topicFound bool - for index, subs := range mux.mapping { - if !subs.manual && subs.topic == topic && subs.connectionName == name { - topicFound = true - mux.mapping = append(mux.mapping[:index], mux.mapping[index+1:]...) - } - } - if !topicFound { - wasError = fmt.Errorf("topic %s was not consumed by '%s'", topic, name) - } - return wasError -} - -// Remove consumer subscription on given topic, partition and initial offset. If there is no such a subscription -// (all fields must match), return error. -func (mux *Multiplexer) stopConsumingPartition(topic string, partition int32, offset int64, name string) error { - mux.rwlock.Lock() - defer mux.rwlock.Unlock() - - var wasError error - var topicFound bool - // Remove consumer from subscription - for index, subs := range mux.mapping { - if subs.manual && subs.topic == topic && subs.partition == partition && subs.offset == offset && subs.connectionName == name { - topicFound = true - mux.mapping = append(mux.mapping[:index], mux.mapping[index+1:]...) - } - // Close the partition consumer related to the subscription - safeclose.Close(subs.partitionConsumer) - } - if !topicFound { - wasError = fmt.Errorf("topic %s, partition %v and offset %v was not consumed by '%s'", - topic, partition, offset, name) - } - // Stop partition consumer - return wasError -} diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/proto_connection.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/proto_connection.go deleted file mode 100644 index 8b9c070ba9..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/mux/proto_connection.go +++ /dev/null @@ -1,364 +0,0 @@ -package mux - -import ( - "fmt" - - "github.com/Shopify/sarama" - "github.com/golang/protobuf/proto" - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/messaging" - "github.com/ligato/cn-infra/messaging/kafka/client" -) - -// Connection is interface for multiplexer with dynamic partitioner. -type Connection interface { - messaging.ProtoWatcher - // Creates new synchronous publisher allowing to publish kafka messages - NewSyncPublisher(topic string) (messaging.ProtoPublisher, error) - // Creates new asynchronous publisher allowing to publish kafka messages - NewAsyncPublisher(topic string, successClb func(messaging.ProtoMessage), errorClb func(messaging.ProtoMessageErr)) (messaging.ProtoPublisher, error) -} - -// ManualConnection is interface for multiplexer with manual partitioner. -type ManualConnection interface { - messaging.ProtoPartitionWatcher - // Creates new synchronous publisher allowing to publish kafka messages to chosen partition - NewSyncPublisherToPartition(topic string, partition int32) (messaging.ProtoPublisher, error) - // Creates new asynchronous publisher allowing to publish kafka messages to chosen partition - NewAsyncPublisherToPartition(topic string, partition int32, successClb func(messaging.ProtoMessage), errorClb func(messaging.ProtoMessageErr)) (messaging.ProtoPublisher, error) -} - -// ProtoConnection represents connection built on hash-mode multiplexer -type ProtoConnection struct { - ProtoConnectionFields -} - -// ProtoManualConnection represents connection built on manual-mode multiplexer -type ProtoManualConnection struct { - ProtoConnectionFields -} - -// ProtoConnectionFields is an entity that provides access to shared producers/consumers of multiplexer. The value of -// message are marshaled and unmarshaled to/from proto.message behind the scene. -type ProtoConnectionFields struct { - // multiplexer is used for access to kafka brokers - multiplexer *Multiplexer - // name identifies the connection - name string - // serializer marshals and unmarshals data to/from proto.Message - serializer keyval.Serializer -} - -type protoSyncPublisherKafka struct { - conn *ProtoConnection - topic string -} - -type protoAsyncPublisherKafka struct { - conn *ProtoConnection - topic string - succCallback func(messaging.ProtoMessage) - errCallback func(messaging.ProtoMessageErr) -} - -type protoManualSyncPublisherKafka struct { - conn *ProtoManualConnection - topic string - partition int32 -} - -type protoManualAsyncPublisherKafka struct { - conn *ProtoManualConnection - topic string - partition int32 - succCallback func(messaging.ProtoMessage) - errCallback func(messaging.ProtoMessageErr) -} - -// NewSyncPublisher creates a new instance of protoSyncPublisherKafka that allows to publish sync kafka messages using common messaging API -func (conn *ProtoConnection) NewSyncPublisher(topic string) (messaging.ProtoPublisher, error) { - return &protoSyncPublisherKafka{conn, topic}, nil -} - -// NewAsyncPublisher creates a new instance of protoAsyncPublisherKafka that allows to publish sync kafka messages using common messaging API -func (conn *ProtoConnection) NewAsyncPublisher(topic string, successClb func(messaging.ProtoMessage), errorClb func(messaging.ProtoMessageErr)) (messaging.ProtoPublisher, error) { - return &protoAsyncPublisherKafka{conn, topic, successClb, errorClb}, nil -} - -// NewSyncPublisherToPartition creates a new instance of protoManualSyncPublisherKafka that allows to publish sync kafka messages using common messaging API -func (conn *ProtoManualConnection) NewSyncPublisherToPartition(topic string, partition int32) (messaging.ProtoPublisher, error) { - return &protoManualSyncPublisherKafka{conn, topic, partition}, nil -} - -// NewAsyncPublisherToPartition creates a new instance of protoManualAsyncPublisherKafka that allows to publish sync kafka -// messages using common messaging API. -func (conn *ProtoManualConnection) NewAsyncPublisherToPartition(topic string, partition int32, successClb func(messaging.ProtoMessage), errorClb func(messaging.ProtoMessageErr)) (messaging.ProtoPublisher, error) { - return &protoManualAsyncPublisherKafka{conn, topic, partition, successClb, errorClb}, nil -} - -// Watch is an alias for ConsumeTopic method. The alias was added in order to conform to messaging.Mux interface. -func (conn *ProtoConnection) Watch(msgClb func(messaging.ProtoMessage), topics ...string) error { - return conn.ConsumeTopic(msgClb, topics...) -} - -// ConsumeTopic is called to start consuming given topics. -// Function can be called until the multiplexer is started, it returns an error otherwise. -// The provided channel should be buffered, otherwise messages might be lost. -func (conn *ProtoConnection) ConsumeTopic(msgClb func(messaging.ProtoMessage), topics ...string) error { - conn.multiplexer.rwlock.Lock() - defer conn.multiplexer.rwlock.Unlock() - - if conn.multiplexer.started { - return fmt.Errorf("ConsumeTopic can be called only if the multiplexer has not been started yet") - } - - byteClb := func(bm *client.ConsumerMessage) { - pm := client.NewProtoConsumerMessage(bm, conn.serializer) - msgClb(pm) - } - - for _, topic := range topics { - // check if we have already consumed the topic - var found bool - var subs *consumerSubscription - LoopSubs: - for _, subscription := range conn.multiplexer.mapping { - if subscription.manual == true { - // do not mix dynamic and manual mode - continue - } - if subscription.topic == topic { - found = true - subs = subscription - break LoopSubs - } - } - - if !found { - subs = &consumerSubscription{ - manual: false, // non-manual example - topic: topic, - connectionName: conn.name, - byteConsMsg: byteClb, - } - // subscribe new topic - conn.multiplexer.mapping = append(conn.multiplexer.mapping, subs) - } - - // add subscription to consumerList - subs.byteConsMsg = byteClb - } - - return nil -} - -// WatchPartition is an alias for ConsumePartition method. The alias was added in order to conform to -// messaging.Mux interface. -func (conn *ProtoManualConnection) WatchPartition(msgClb func(messaging.ProtoMessage), topic string, partition int32, offset int64) error { - return conn.ConsumePartition(msgClb, topic, partition, offset) -} - -// ConsumePartition is called to start consuming given topic on partition with offset -// Function can be called until the multiplexer is started, it returns an error otherwise. -// The provided channel should be buffered, otherwise messages might be lost. -func (conn *ProtoManualConnection) ConsumePartition(msgClb func(messaging.ProtoMessage), topic string, partition int32, offset int64) error { - conn.multiplexer.rwlock.Lock() - defer conn.multiplexer.rwlock.Unlock() - var err error - - // check if we have already consumed the topic on partition and offset - var found bool - var subs *consumerSubscription - - for _, subscription := range conn.multiplexer.mapping { - if subscription.manual == false { - // do not mix dynamic and manual mode - continue - } - if subscription.topic == topic && subscription.partition == partition && subscription.offset == offset { - found = true - subs = subscription - break - } - } - - byteClb := func(bm *client.ConsumerMessage) { - pm := client.NewProtoConsumerMessage(bm, conn.serializer) - msgClb(pm) - } - - if !found { - subs = &consumerSubscription{ - manual: true, // manual example - topic: topic, - partition: partition, - offset: offset, - connectionName: conn.name, - byteConsMsg: byteClb, - } - // subscribe new topic on partition - conn.multiplexer.mapping = append(conn.multiplexer.mapping, subs) - } - - // add subscription to consumerList - subs.byteConsMsg = byteClb - - if conn.multiplexer.started { - conn.multiplexer.Infof("Starting 'post-init' manual Consumer") - subs.partitionConsumer, err = conn.StartPostInitConsumer(topic, partition, offset) - if err != nil { - return err - } - if subs.partitionConsumer == nil { - return nil - } - } - - return nil -} - -// StartPostInitConsumer allows to start a new partition consumer after mux is initialized. Created partition consumer -// is returned so it can be stored in subscription and closed if needed -func (conn *ProtoManualConnection) StartPostInitConsumer(topic string, partition int32, offset int64) (*sarama.PartitionConsumer, error) { - multiplexer := conn.multiplexer - multiplexer.WithFields(logging.Fields{"topic": topic}).Debugf("Post-init consuming started") - - if multiplexer.Consumer == nil || multiplexer.Consumer.SConsumer == nil { - multiplexer.Warn("Unable to start post-init Consumer, client not available in the mux") - return nil, nil - } - - // Consumer that reads topic/partition/offset. Throws error if offset is 'in the future' (message with offset does not exist yet) - partitionConsumer, err := multiplexer.Consumer.SConsumer.ConsumePartition(topic, partition, offset) - if err != nil { - return nil, err - } - multiplexer.Consumer.StartConsumerManualHandlers(partitionConsumer) - - return &partitionConsumer, nil -} - -// StopWatch is an alias for StopConsuming method. The alias was added in order to conform to messaging.Mux interface. -func (conn *ProtoConnectionFields) StopWatch(topic string) error { - return conn.StopConsuming(topic) -} - -// StopConsuming cancels the previously created subscription for consuming the topic. -func (conn *ProtoConnectionFields) StopConsuming(topic string) error { - return conn.multiplexer.stopConsuming(topic, conn.name) -} - -// StopWatchPartition is an alias for StopConsumingPartition method. The alias was added in order to conform to messaging.Mux interface. -func (conn *ProtoConnectionFields) StopWatchPartition(topic string, partition int32, offset int64) error { - return conn.StopConsumingPartition(topic, partition, offset) -} - -// StopConsumingPartition cancels the previously created subscription for consuming the topic, partition and offset -func (conn *ProtoConnectionFields) StopConsumingPartition(topic string, partition int32, offset int64) error { - return conn.multiplexer.stopConsumingPartition(topic, partition, offset, conn.name) -} - -// Put publishes a message into kafka -func (p *protoSyncPublisherKafka) Put(key string, message proto.Message, opts ...datasync.PutOption) error { - _, err := p.conn.sendSyncMessage(p.topic, DefPartition, key, message, false) - return err -} - -// Put publishes a message into kafka -func (p *protoAsyncPublisherKafka) Put(key string, message proto.Message, opts ...datasync.PutOption) error { - return p.conn.sendAsyncMessage(p.topic, DefPartition, key, message, false, nil, p.succCallback, p.errCallback) -} - -// Put publishes a message into kafka -func (p *protoManualSyncPublisherKafka) Put(key string, message proto.Message, opts ...datasync.PutOption) error { - _, err := p.conn.sendSyncMessage(p.topic, p.partition, key, message, true) - return err -} - -// Put publishes a message into kafka -func (p *protoManualAsyncPublisherKafka) Put(key string, message proto.Message, opts ...datasync.PutOption) error { - return p.conn.sendAsyncMessage(p.topic, p.partition, key, message, true, nil, p.succCallback, p.errCallback) -} - -// MarkOffset marks the specified message as read -func (conn *ProtoConnectionFields) MarkOffset(msg messaging.ProtoMessage, metadata string) { - if conn.multiplexer != nil && conn.multiplexer.Consumer != nil { - if msg == nil { - return - } - consumerMsg := &client.ConsumerMessage{ - Topic: msg.GetTopic(), - Partition: msg.GetPartition(), - Offset: msg.GetOffset(), - } - - conn.multiplexer.Consumer.MarkOffset(consumerMsg, metadata) - } -} - -// CommitOffsets manually commits message offsets -func (conn *ProtoConnectionFields) CommitOffsets() error { - if conn.multiplexer != nil && conn.multiplexer.Consumer != nil { - return conn.multiplexer.Consumer.CommitOffsets() - } - return fmt.Errorf("cannot commit offsets, consumer not available") -} - -// sendSyncMessage sends a message using the sync API. If manual mode is chosen, the appropriate producer will be used. -func (conn *ProtoConnectionFields) sendSyncMessage(topic string, partition int32, key string, value proto.Message, manualMode bool) (offset int64, err error) { - data, err := conn.serializer.Marshal(value) - if err != nil { - return 0, err - } - - if manualMode { - msg, err := conn.multiplexer.manSyncProducer.SendMsgToPartition(topic, partition, sarama.StringEncoder(key), sarama.ByteEncoder(data)) - if err != nil { - return 0, err - } - return msg.Offset, err - } - msg, err := conn.multiplexer.hashSyncProducer.SendMsgToPartition(topic, partition, sarama.StringEncoder(key), sarama.ByteEncoder(data)) - if err != nil { - return 0, err - } - return msg.Offset, err -} - -// sendAsyncMessage sends a message using the async API. If manual mode is chosen, the appropriate producer will be used. -func (conn *ProtoConnectionFields) sendAsyncMessage(topic string, partition int32, key string, value proto.Message, manualMode bool, - meta interface{}, successClb func(messaging.ProtoMessage), errClb func(messaging.ProtoMessageErr)) error { - data, err := conn.serializer.Marshal(value) - if err != nil { - return err - } - succByteClb := func(msg *client.ProducerMessage) { - protoMsg := &client.ProtoProducerMessage{ - ProducerMessage: msg, - Serializer: conn.serializer, - } - successClb(protoMsg) - } - - errByteClb := func(msg *client.ProducerError) { - protoMsg := &client.ProtoProducerMessageErr{ - ProtoProducerMessage: &client.ProtoProducerMessage{ - ProducerMessage: msg.ProducerMessage, - Serializer: conn.serializer, - }, - Err: msg.Err, - } - errClb(protoMsg) - } - - if manualMode { - auxMeta := &asyncMeta{successClb: succByteClb, errorClb: errByteClb, usersMeta: meta} - conn.multiplexer.manAsyncProducer.SendMsgToPartition(topic, partition, sarama.StringEncoder(key), sarama.ByteEncoder(data), auxMeta) - return nil - } - auxMeta := &asyncMeta{successClb: succByteClb, errorClb: errByteClb, usersMeta: meta} - conn.multiplexer.hashAsyncProducer.SendMsgToPartition(topic, partition, sarama.StringEncoder(key), sarama.ByteEncoder(data), auxMeta) - return nil -} diff --git a/vendor/github.com/ligato/cn-infra/messaging/kafka/plugin_impl_kafka.go b/vendor/github.com/ligato/cn-infra/messaging/kafka/plugin_impl_kafka.go deleted file mode 100644 index 56811428c3..0000000000 --- a/vendor/github.com/ligato/cn-infra/messaging/kafka/plugin_impl_kafka.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package kafka - -import ( - "fmt" - - "github.com/Shopify/sarama" - - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/cn-infra/flavors/local" - "github.com/ligato/cn-infra/health/statuscheck" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/messaging" - "github.com/ligato/cn-infra/messaging/kafka/client" - "github.com/ligato/cn-infra/messaging/kafka/mux" - "github.com/ligato/cn-infra/utils/clienttls" - "github.com/ligato/cn-infra/utils/safeclose" -) - -const topic = "status-check" - -// Plugin provides API for interaction with kafka brokers. -type Plugin struct { - Deps // inject - - mux *mux.Multiplexer - subscription chan *client.ConsumerMessage - - // Kafka plugin is using two clients. The first one is using 'hash' (default) partitioner. The second mux - // uses manual partitioner which allows to send a message to specified partition and watching to desired partition/offset - hsClient sarama.Client - manClient sarama.Client - - disabled bool -} - -// Deps groups dependencies injected into the plugin so that they are -// logically separated from other plugin fields. -type Deps struct { - local.PluginInfraDeps //inject -} - -// FromExistingMux is used mainly for testing purposes. -func FromExistingMux(mux *mux.Multiplexer) *Plugin { - return &Plugin{mux: mux} -} - -// Init is called at plugin initialization. -func (plugin *Plugin) Init() (err error) { - // Prepare topic and subscription for status check client - plugin.subscription = make(chan *client.ConsumerMessage) - - // Get muxCfg data (contains kafka brokers ip addresses) - muxCfg := &mux.Config{} - found, err := plugin.PluginConfig.GetValue(muxCfg) - if !found { - plugin.Log.Info("kafka config not found ", plugin.PluginConfig.GetConfigName(), " - skip loading this plugin") - plugin.disabled = true - return nil //skip loading the plugin - } - if err != nil { - return err - } - // retrieve clientCfg - clientCfg, err := plugin.getClientConfig(muxCfg, plugin.Log, topic) - if err != nil { - return err - } - - // init 'hash' sarama client - plugin.hsClient, err = client.NewClient(clientCfg, client.Hash) - if err != nil { - return err - } - - // init 'manual' sarama client - plugin.manClient, err = client.NewClient(clientCfg, client.Manual) - if err != nil { - return err - } - - // Initialize both multiplexers to allow both, dynamic and manual mode - if plugin.mux == nil { - name := clientCfg.GroupID - plugin.Log.Infof("Group ID is set to %v", name) - plugin.mux, err = mux.InitMultiplexerWithConfig(clientCfg, plugin.hsClient, plugin.manClient, name, plugin.Log) - if err != nil { - return err - } - plugin.Log.Debug("Default multiplexer initialized") - } - - return err -} - -// AfterInit is called in the second phase of the initialization. The kafka multiplexerNewWatcher -// is started, all consumers have to be subscribed until this phase. -func (plugin *Plugin) AfterInit() error { - if plugin.mux != nil { - err := plugin.mux.Start() - if err != nil { - return err - } - } - - // Register for providing status reports (polling mode) - if plugin.StatusCheck != nil && !plugin.disabled { - plugin.StatusCheck.Register(plugin.PluginName, func() (statuscheck.PluginState, error) { - if plugin.hsClient == nil || plugin.hsClient.Closed() { - return statuscheck.Error, fmt.Errorf("kafka client/consumer not available") - } - // Method 'RefreshMetadata()' returns error if kafka server is unavailable - err := plugin.hsClient.RefreshMetadata(topic) - if err == nil { - return statuscheck.OK, nil - } - plugin.Log.Errorf("Kafka server unavailable") - return statuscheck.Error, err - }) - } else { - plugin.Log.Warnf("Unable to start status check for kafka") - } - - return nil -} - -// Close is called at plugin cleanup phase. -func (plugin *Plugin) Close() error { - return safeclose.Close(plugin.hsClient, plugin.manClient, plugin.mux) -} - -// NewBytesConnection returns a new instance of a connection to access kafka brokers. The connection allows to create -// new kafka providers/consumers on multiplexer with hash partitioner. -func (plugin *Plugin) NewBytesConnection(name string) *mux.BytesConnectionStr { - return plugin.mux.NewBytesConnection(name) -} - -// NewBytesConnectionToPartition returns a new instance of a connection to access kafka brokers. The connection allows to create -// new kafka providers/consumers on multiplexer with manual partitioner which allows to send messages to specific partition -// in kafka cluster and watch on partition/offset. -func (plugin *Plugin) NewBytesConnectionToPartition(name string) *mux.BytesManualConnectionStr { - return plugin.mux.NewBytesManualConnection(name) -} - -// NewProtoConnection returns a new instance of a connection to access kafka brokers. The connection allows to create -// new kafka providers/consumers on multiplexer with hash partitioner.The connection uses proto-modelled messages. -func (plugin *Plugin) NewProtoConnection(name string) mux.Connection { - return plugin.mux.NewProtoConnection(name, &keyval.SerializerJSON{}) -} - -// NewProtoManualConnection returns a new instance of a connection to access kafka brokers. The connection allows to create -// new kafka providers/consumers on multiplexer with manual partitioner which allows to send messages to specific partition -// in kafka cluster and watch on partition/offset. The connection uses proto-modelled messages. -func (plugin *Plugin) NewProtoManualConnection(name string) mux.ManualConnection { - return plugin.mux.NewProtoManualConnection(name, &keyval.SerializerJSON{}) -} - -// NewSyncPublisher creates a publisher that allows to publish messages using synchronous API. The publisher creates -// new proto connection on multiplexer with default partitioner. -func (plugin *Plugin) NewSyncPublisher(connectionName string, topic string) (messaging.ProtoPublisher, error) { - return plugin.NewProtoConnection(connectionName).NewSyncPublisher(topic) -} - -// NewSyncPublisherToPartition creates a publisher that allows to publish messages to custom partition using synchronous API. -// The publisher creates new proto connection on multiplexer with manual partitioner. -func (plugin *Plugin) NewSyncPublisherToPartition(connectionName string, topic string, partition int32) (messaging.ProtoPublisher, error) { - return plugin.NewProtoManualConnection(connectionName).NewSyncPublisherToPartition(topic, partition) -} - -// NewAsyncPublisher creates a publisher that allows to publish messages using asynchronous API. The publisher creates -// new proto connection on multiplexer with default partitioner. -func (plugin *Plugin) NewAsyncPublisher(connectionName string, topic string, successClb func(messaging.ProtoMessage), errorClb func(messaging.ProtoMessageErr)) (messaging.ProtoPublisher, error) { - return plugin.NewProtoConnection(connectionName).NewAsyncPublisher(topic, successClb, errorClb) -} - -// NewAsyncPublisherToPartition creates a publisher that allows to publish messages to custom partition using asynchronous API. -// The publisher creates new proto connection on multiplexer with manual partitioner. -func (plugin *Plugin) NewAsyncPublisherToPartition(connectionName string, topic string, partition int32, successClb func(messaging.ProtoMessage), errorClb func(messaging.ProtoMessageErr)) (messaging.ProtoPublisher, error) { - return plugin.NewProtoManualConnection(connectionName).NewAsyncPublisherToPartition(topic, partition, successClb, errorClb) -} - -// NewWatcher creates a watcher that allows to start/stop consuming of messaging published to given topics. -func (plugin *Plugin) NewWatcher(name string) messaging.ProtoWatcher { - return plugin.NewProtoConnection(name) -} - -// NewPartitionWatcher creates a watcher that allows to start/stop consuming of messaging published to given topics, offset and partition -func (plugin *Plugin) NewPartitionWatcher(name string) messaging.ProtoPartitionWatcher { - return plugin.NewProtoManualConnection(name) -} - -// Disabled if the plugin config was not found -func (plugin *Plugin) Disabled() (disabled bool) { - return plugin.disabled -} - -// Receive client config according to kafka config data -func (plugin *Plugin) getClientConfig(config *mux.Config, logger logging.Logger, topic string) (*client.Config, error) { - clientCfg := client.NewConfig(logger) - // Set brokers obtained from kafka config. In case there are none available, use a default one - if len(config.Addrs) > 0 { - clientCfg.SetBrokers(config.Addrs...) - } else { - clientCfg.SetBrokers(mux.DefAddress) - } - // Set group ID obtained from kafka config. In case there is none, use a service label - if config.GroupID != "" { - clientCfg.SetGroup(config.GroupID) - } else { - clientCfg.SetGroup(plugin.ServiceLabel.GetAgentLabel()) - } - clientCfg.SetRecvMessageChan(plugin.subscription) - clientCfg.SetInitialOffset(sarama.OffsetNewest) - clientCfg.SetTopics(topic) - if config.TLS.Enabled { - plugin.Log.Info("TLS enabled") - tlsConfig, err := clienttls.CreateTLSConfig(config.TLS) - if err != nil { - return nil, err - } - clientCfg.SetTLS(tlsConfig) - } - return clientCfg, nil -} diff --git a/vendor/github.com/ligato/cn-infra/rpc/grpc/config.go b/vendor/github.com/ligato/cn-infra/rpc/grpc/config.go index f9bf4ee0dc..621d541b08 100644 --- a/vendor/github.com/ligato/cn-infra/rpc/grpc/config.go +++ b/vendor/github.com/ligato/cn-infra/rpc/grpc/config.go @@ -19,8 +19,9 @@ import ( "strings" "github.com/ligato/cn-infra/config" - "github.com/ligato/cn-infra/core" + "github.com/ligato/cn-infra/infra" "github.com/namsral/flag" + "google.golang.org/grpc" ) // Config is a configuration for GRPC netListener @@ -53,6 +54,16 @@ type Config struct { //TODO TLS/credentials } +func (cfg *Config) getGrpcOptions() (opts []grpc.ServerOption) { + switch { + case cfg.MaxConcurrentStreams > 0: + opts = append(opts, grpc.MaxConcurrentStreams(cfg.MaxConcurrentStreams)) + case cfg.MaxMsgSize > 0: + opts = append(opts, grpc.MaxMsgSize(cfg.MaxMsgSize)) + } + return +} + // GetPort parses suffix from endpoint & returns integer after last ":" (otherwise it returns 0) func (cfg *Config) GetPort() int { if cfg.Endpoint != "" && cfg.Endpoint != ":" { @@ -69,7 +80,7 @@ func (cfg *Config) GetPort() int { } // DeclareGRPCPortFlag declares GRPC port (with usage & default value) a flag for a particular plugin name -func DeclareGRPCPortFlag(pluginName core.PluginName) { +func DeclareGRPCPortFlag(pluginName infra.PluginName) { plugNameUpper := strings.ToUpper(string(pluginName)) usage := "Configure Agent' " + plugNameUpper + " net listener (port & timeouts); also set via '" + @@ -77,6 +88,6 @@ func DeclareGRPCPortFlag(pluginName core.PluginName) { flag.String(grpcPortFlag(pluginName), "", usage) } -func grpcPortFlag(pluginName core.PluginName) string { +func grpcPortFlag(pluginName infra.PluginName) string { return strings.ToLower(string(pluginName)) + "-port" } diff --git a/vendor/github.com/ligato/cn-infra/rpc/grpc/grpc.conf b/vendor/github.com/ligato/cn-infra/rpc/grpc/grpc.conf index 85a62581a4..ae1f338071 100644 --- a/vendor/github.com/ligato/cn-infra/rpc/grpc/grpc.conf +++ b/vendor/github.com/ligato/cn-infra/rpc/grpc/grpc.conf @@ -2,17 +2,18 @@ endpoint: 0.0.0.0:9111 # If unix domain socket file is used for GRPC communication, permissions to the file can be set here. -permission: 740 +# Permission value uses standard three-or-four number linux binary reference. +permission: 000 # If socket file exists in defined path, it is not removed by default, GRPC plugin tries to use it. # Set the force removal flag to 'true' ensures that the socket file will be always re-created force-socket-removal: false -# Available socket types: tcp, tcp4, tcp6, unix, unixpacket +# Available socket types: tcp, tcp4, tcp6, unix, unixpacket. If not set, defaults to tcp. network: tcp # Maximum message size in bytes for inbound mesages. If not set, GRPC uses the default 4MB. max-msg-size: 4096 # Limit of server streams to each server transport. -max-concurrent-streams: 2 +max-concurrent-streams: 0 diff --git a/vendor/github.com/ligato/cn-infra/rpc/grpc/listen_and_serve.go b/vendor/github.com/ligato/cn-infra/rpc/grpc/listen_and_serve.go index d5d651380d..91e5347bcb 100644 --- a/vendor/github.com/ligato/cn-infra/rpc/grpc/listen_and_serve.go +++ b/vendor/github.com/ligato/cn-infra/rpc/grpc/listen_and_serve.go @@ -18,11 +18,10 @@ import ( "fmt" "io" "net" - "time" - "os" "strconv" "strings" + "time" "google.golang.org/grpc" ) @@ -40,7 +39,6 @@ func FromExistingServer(listenAndServe ListenAndServe) *Plugin { // ListenAndServeGRPC starts a netListener. func ListenAndServeGRPC(config *Config, grpcServer *grpc.Server) (netListener net.Listener, err error) { - // Default to tcp socket type of not specified for backward compatibility socketType := config.Network if socketType == "" { diff --git a/vendor/github.com/ligato/cn-infra/rpc/grpc/options.go b/vendor/github.com/ligato/cn-infra/rpc/grpc/options.go new file mode 100644 index 0000000000..5e92867677 --- /dev/null +++ b/vendor/github.com/ligato/cn-infra/rpc/grpc/options.go @@ -0,0 +1,69 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpc + +import ( + "github.com/ligato/cn-infra/config" + "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/rpc/rest" +) + +// DefaultPlugin is a default instance of Plugin. +var DefaultPlugin = *NewPlugin() + +// NewPlugin creates a new Plugin with the provided Options. +func NewPlugin(opts ...Option) *Plugin { + p := &Plugin{} + + p.PluginName = "grpc" + //p.HTTP= &rest.DefaultPlugin // turned off by default + + for _, o := range opts { + o(p) + } + + if p.Deps.Log == nil { + p.Deps.Log = logging.ForPlugin(p.String()) + } + if p.Deps.PluginConfig == nil { + p.Deps.PluginConfig = config.ForPlugin(p.String()) + } + + return p +} + +// Option is a function that can be used in NewPlugin to customize Plugin. +type Option func(*Plugin) + +// UseConf returns Option which injects a particular configuration. +func UseConf(conf Config) Option { + return func(p *Plugin) { + p.Config = &conf + } +} + +// UseDeps returns Option that can inject custom dependencies. +func UseDeps(cb func(*Deps)) Option { + return func(p *Plugin) { + cb(&p.Deps) + } +} + +// UseHTTP returns Option that sets HTTP handlers. +func UseHTTP(h rest.HTTPHandlers) Option { + return func(p *Plugin) { + p.Deps.HTTP = h + } +} diff --git a/vendor/github.com/ligato/cn-infra/rpc/grpc/plugin_impl_grpc.go b/vendor/github.com/ligato/cn-infra/rpc/grpc/plugin_impl_grpc.go index 8abb846467..72d67b4b2a 100644 --- a/vendor/github.com/ligato/cn-infra/rpc/grpc/plugin_impl_grpc.go +++ b/vendor/github.com/ligato/cn-infra/rpc/grpc/plugin_impl_grpc.go @@ -17,12 +17,9 @@ package grpc import ( "io" "net/http" - "strconv" - "github.com/ligato/cn-infra/config" - "github.com/ligato/cn-infra/core" - "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/rpc/rest" "github.com/ligato/cn-infra/utils/safeclose" "github.com/unrolled/render" @@ -33,8 +30,12 @@ import ( // Plugin maintains the GRPC netListener (see Init, AfterInit, Close methods) type Plugin struct { Deps - // Stored GRPC config (used in example) + *Config + + option Option + //Option func(*Plugin) + // GRPC server instance grpcServer *grpc.Server // Used mainly for testing purposes @@ -47,15 +48,12 @@ type Plugin struct { // Deps is a list of injected dependencies of the GRPC plugin. type Deps struct { - Log logging.PluginLogger - PluginName core.PluginName - HTTP rest.HTTPHandlers - config.PluginConfig + infra.Deps + HTTP rest.HTTPHandlers } // Init prepares GRPC netListener for registration of individual service -func (plugin *Plugin) Init() error { - var err error +func (plugin *Plugin) Init() (err error) { // Get GRPC configuration file if plugin.Config == nil { plugin.Config, err = plugin.getGrpcConfig() @@ -66,14 +64,7 @@ func (plugin *Plugin) Init() error { // Prepare GRPC server if plugin.grpcServer == nil { - var opts []grpc.ServerOption - if plugin.Config.MaxConcurrentStreams > 0 { - opts = append(opts, grpc.MaxConcurrentStreams(plugin.Config.MaxConcurrentStreams)) - } - if plugin.Config.MaxMsgSize > 0 { - opts = append(opts, grpc.MaxMsgSize(plugin.Config.MaxMsgSize)) - } - + opts := plugin.Config.getGrpcOptions() plugin.grpcServer = grpc.NewServer(opts...) grpclog.SetLogger(plugin.Log.NewLogger("grpc-server")) } @@ -91,12 +82,16 @@ func (plugin *Plugin) Init() error { // AfterInit starts the HTTP netListener. func (plugin *Plugin) AfterInit() (err error) { + //plugin.Log.Debugf("GRPC AfterInit()") + if plugin.Deps.HTTP != nil { - plugin.Log.Info("exposing GRPC services over HTTP port " + strconv.Itoa(plugin.Deps.HTTP.GetPort()) + - " /service ") - plugin.Deps.HTTP.RegisterHTTPHandler("service", func(formatter *render.Render) http.HandlerFunc { + plugin.Log.Infof("exposing GRPC services via HTTP (port %v) on: /service", + strconv.Itoa(plugin.Deps.HTTP.GetPort())) + plugin.Deps.HTTP.RegisterHTTPHandler("/service", func(formatter *render.Render) http.HandlerFunc { return plugin.grpcServer.ServeHTTP }, "GET", "PUT", "POST") + } else { + plugin.Log.Infof("HTTP not set, skip exposing GRPC services") } return err @@ -124,14 +119,6 @@ func (plugin *Plugin) IsDisabled() (disabled bool) { return plugin.disabled } -// String returns plugin name (if not set defaults to "HTTP") -func (plugin *Plugin) String() string { - if plugin.Deps.PluginName != "" { - return string(plugin.Deps.PluginName) - } - return "GRPC" -} - func (plugin *Plugin) getGrpcConfig() (*Config, error) { var grpcCfg Config found, err := plugin.PluginConfig.GetValue(&grpcCfg) diff --git a/vendor/github.com/ligato/cn-infra/rpc/prometheus/options.go b/vendor/github.com/ligato/cn-infra/rpc/prometheus/options.go new file mode 100644 index 0000000000..42e6553def --- /dev/null +++ b/vendor/github.com/ligato/cn-infra/rpc/prometheus/options.go @@ -0,0 +1,37 @@ +package prometheus + +import ( + "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/rpc/rest" +) + +// DefaultPlugin is a default instance of Plugin. +var DefaultPlugin = *NewPlugin() + +// NewPlugin creates a new Plugin with the provided Options. +func NewPlugin(opts ...Option) *Plugin { + p := &Plugin{} + + p.PluginName = "prometheus" + p.HTTP = &rest.DefaultPlugin + + for _, o := range opts { + o(p) + } + + if p.Deps.Log == nil { + p.Deps.Log = logging.ForPlugin(p.String()) + } + + return p +} + +// Option is a function that can be used in NewPlugin to customize Plugin. +type Option func(*Plugin) + +// UseDeps returns Option that can inject custom dependencies. +func UseDeps(cb func(*Deps)) Option { + return func(p *Plugin) { + cb(&p.Deps) + } +} diff --git a/vendor/github.com/ligato/cn-infra/rpc/prometheus/plugin_impl_prometheus.go b/vendor/github.com/ligato/cn-infra/rpc/prometheus/plugin_impl_prometheus.go index e4201aabaa..5d56c9af1e 100644 --- a/vendor/github.com/ligato/cn-infra/rpc/prometheus/plugin_impl_prometheus.go +++ b/vendor/github.com/ligato/cn-infra/rpc/prometheus/plugin_impl_prometheus.go @@ -16,14 +16,16 @@ package prometheus import ( "errors" - "github.com/ligato/cn-infra/flavors/local" + "net/http" + "strings" + "sync" + + "github.com/ligato/cn-infra/infra" + "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/rpc/rest" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/unrolled/render" - "net/http" - "strings" - "sync" ) // DefaultRegistry default Prometheus metrics URL @@ -41,6 +43,7 @@ var ( // Plugin struct holds all plugin-related data. type Plugin struct { Deps + sync.Mutex // regs is a map of URL path(symbolic names) to registries. Registries group metrics and can be exposed at different urls. regs map[string]*registry @@ -48,7 +51,8 @@ type Plugin struct { // Deps lists dependencies of the plugin. type Deps struct { - local.PluginInfraDeps // inject + infra.PluginName + Log logging.PluginLogger // HTTP server used to expose metrics HTTP rest.HTTPHandlers // inject } @@ -61,8 +65,7 @@ type registry struct { } // Init initializes the internal structures -func (p *Plugin) Init() (err error) { - +func (p *Plugin) Init() error { p.regs = map[string]*registry{} // add default registry diff --git a/vendor/github.com/ligato/cn-infra/rpc/rest/auth.go b/vendor/github.com/ligato/cn-infra/rpc/rest/auth.go new file mode 100644 index 0000000000..16718268fc --- /dev/null +++ b/vendor/github.com/ligato/cn-infra/rpc/rest/auth.go @@ -0,0 +1,68 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rest + +import ( + "fmt" + "net/http" + "strings" +) + +// BasicHTTPAuthenticator is a delegate that implements basic HTTP authentication +type BasicHTTPAuthenticator interface { + // Authenticate returns true if user is authenticated successfully, false otherwise. + Authenticate(user string, pass string) bool +} + +func auth(fn http.HandlerFunc, auth BasicHTTPAuthenticator) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + user, pass, _ := r.BasicAuth() + if !auth.Authenticate(user, pass) { + w.Header().Set("WWW-Authenticate", "Provide valid username and password") + http.Error(w, "Unauthorized.", http.StatusUnauthorized) + return + } + fn(w, r) + } +} + +// staticAuthenticator is default implementation of BasicHTTPAuthenticator +type staticAuthenticator struct { + credentials map[string]string +} + +// newStaticAuthenticator creates new instance of static authenticator. +// Argument `users` is a slice of colon-separated username and password couples. +func newStaticAuthenticator(users []string) (*staticAuthenticator, error) { + sa := &staticAuthenticator{credentials: map[string]string{}} + for _, u := range users { + fields := strings.Split(u, ":") + if len(fields) != 2 { + return nil, fmt.Errorf("invalid format of basic auth entry '%v' expected 'user:pass'", u) + } + sa.credentials[fields[0]] = fields[1] + } + return sa, nil +} + +// Authenticate looks up the given user name and password in the internal map. +// If match is found returns true, false otherwise. +func (sa *staticAuthenticator) Authenticate(user string, pass string) bool { + password, found := sa.credentials[user] + if !found { + return false + } + return pass == password +} diff --git a/vendor/github.com/ligato/cn-infra/rpc/rest/config.go b/vendor/github.com/ligato/cn-infra/rpc/rest/config.go index 4a73386a5b..95b69418cc 100644 --- a/vendor/github.com/ligato/cn-infra/rpc/rest/config.go +++ b/vendor/github.com/ligato/cn-infra/rpc/rest/config.go @@ -20,43 +20,18 @@ import ( "time" "github.com/ligato/cn-infra/config" - "github.com/ligato/cn-infra/core" + "github.com/ligato/cn-infra/infra" "github.com/namsral/flag" ) -// PluginConfig tries : -// - to load flag -port and then FixConfig() just in case -// - alternatively -config and then FixConfig() just in case -// - alternatively DefaultConfig() -func PluginConfig(pluginCfg config.PluginConfig, cfg *Config, pluginName core.PluginName) error { - portFlag := flag.Lookup(httpPortFlag(pluginName)) - if portFlag != nil && portFlag.Value != nil && portFlag.Value.String() != "" && cfg != nil { - cfg.Endpoint = DefaultIP + ":" + portFlag.Value.String() - } - - if pluginCfg != nil { - _, err := pluginCfg.GetValue(cfg) - if err != nil { - return err - } - } - - FixConfig(cfg) - - return nil -} - -// DefaultConfig returns new instance of config with default endpoint -func DefaultConfig() *Config { - return &Config{Endpoint: DefaultEndpoint} -} - -// FixConfig fill default values for empty fields -func FixConfig(cfg *Config) { - if cfg != nil && cfg.Endpoint == "" { - cfg.Endpoint = DefaultEndpoint - } -} +const ( + // DefaultHost is a host used by default + DefaultHost = "0.0.0.0" + // DefaultHTTPPort is a port used by default + DefaultHTTPPort = "9191" + // DefaultEndpoint 0.0.0.0:9191 + DefaultEndpoint = DefaultHost + ":" + DefaultHTTPPort +) // Config is a configuration for HTTP server // It is meant to be extended with security (TLS...) @@ -115,6 +90,46 @@ type Config struct { ClientCerts []string `json:"client-cert-files"` } +// DefaultConfig returns new instance of config with default endpoint +func DefaultConfig() *Config { + return &Config{ + Endpoint: DefaultEndpoint, + } +} + +// PluginConfig tries : +// - to load flag -port and then FixConfig() just in case +// - alternatively -config and then FixConfig() just in case +// - alternatively DefaultConfig() +func PluginConfig(pluginCfg config.PluginConfig, cfg *Config, pluginName infra.PluginName) error { + portFlag := flag.Lookup(httpPortFlag(pluginName)) + + if portFlag != nil && portFlag.Value != nil && portFlag.Value.String() != "" && cfg != nil { + cfg.Endpoint = DefaultHost + ":" + portFlag.Value.String() + } + + if pluginCfg != nil { + _, err := pluginCfg.GetValue(cfg) + if err != nil { + return err + } + } + + FixConfig(cfg) + + return nil +} + +// FixConfig fill default values for empty fields +func FixConfig(cfg *Config) { + if cfg == nil { + return + } + if cfg.Endpoint == "" { + cfg.Endpoint = DefaultEndpoint + } +} + // GetPort parses suffix from endpoint & returns integer after last ":" (otherwise it returns 0) func (cfg *Config) GetPort() int { if cfg.Endpoint != "" && cfg.Endpoint != ":" { @@ -136,7 +151,7 @@ func (cfg *Config) UseHTTPS() bool { } // DeclareHTTPPortFlag declares http port (with usage & default value) a flag for a particular plugin name -func DeclareHTTPPortFlag(pluginName core.PluginName, defaultPortOpts ...uint) { +func DeclareHTTPPortFlag(pluginName infra.PluginName, defaultPortOpts ...uint) { var defaultPort string if len(defaultPortOpts) > 0 { defaultPort = string(defaultPortOpts[0]) @@ -151,6 +166,6 @@ func DeclareHTTPPortFlag(pluginName core.PluginName, defaultPortOpts ...uint) { flag.String(httpPortFlag(pluginName), defaultPort, usage) } -func httpPortFlag(pluginName core.PluginName) string { +func httpPortFlag(pluginName infra.PluginName) string { return strings.ToLower(string(pluginName)) + "-port" } diff --git a/vendor/github.com/ligato/cn-infra/rpc/rest/http.conf b/vendor/github.com/ligato/cn-infra/rpc/rest/http.conf index f600ee49b6..542dfe8f5c 100644 --- a/vendor/github.com/ligato/cn-infra/rpc/rest/http.conf +++ b/vendor/github.com/ligato/cn-infra/rpc/rest/http.conf @@ -3,21 +3,23 @@ endpoint: 0.0.0.0:9191 # Maximum duration for reading the entire request, including the body. Because read timeout does not let handlers # make per-request decisions on each request body's acceptable deadline or upload rate, most users will prefer -# to use read-header-timeout. It is valid to use them both. -read-timeout: 10 +# to use read-header-timeout. It is valid to use them both. Read timeout is set in nanoseconds. +read-timeout: 0 # Header timeout is the amount of time allowed to read request headers. The connection's read deadline is reset -# after reading the headers and the Handler can decide what is considered too slow for the body. -read-header-timeout: 10 +# after reading the headers and the Handler can decide what is considered too slow for the body. Read header +# timeout is set in nanoseconds. +read-header-timeout: 0 # WriteTimeout is the maximum duration before timing out writes of the response. It is reset whenever a new # request's header is read. Like ReadTimeout, it does not let Handlers make decisions on a per-request basis. -write-timeout: 10 +# Write timeout is set in nanoseconds. +write-timeout: 0 # Maximum amount of time to wait for the next request when keep-alives are enabled. If idle timeout is zero, -# the value of ReadTimeout is used. If both are zero, there is no timeout. -idle-timeout: 150 +# the value of ReadTimeout is used. If both are zero, there is no timeout. Idle timeout is set in nanoseconds. +idle-timeout: 0 # Field controls the maximum number of bytes the server will read parsing the request header's keys and values, # including the request line. It does not limit the size of the request body. -max-header-bytes: 150 \ No newline at end of file +max-header-bytes: 0 \ No newline at end of file diff --git a/vendor/github.com/ligato/cn-infra/rpc/rest/options.go b/vendor/github.com/ligato/cn-infra/rpc/rest/options.go new file mode 100644 index 0000000000..aa04a8ef5d --- /dev/null +++ b/vendor/github.com/ligato/cn-infra/rpc/rest/options.go @@ -0,0 +1,72 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rest + +import ( + "fmt" + + "github.com/ligato/cn-infra/config" + "github.com/ligato/cn-infra/logging" +) + +// DefaultPlugin is a default instance of Plugin. +var DefaultPlugin = *NewPlugin() + +// NewPlugin creates a new Plugin with the provided Options. +func NewPlugin(opts ...Option) *Plugin { + p := &Plugin{} + + p.PluginName = "http" + + for _, o := range opts { + o(p) + } + + if p.Deps.Log == nil { + p.Deps.Log = logging.ForPlugin(p.String()) + } + if p.Deps.PluginConfig == nil { + p.Deps.PluginConfig = config.ForPlugin(p.String(), func(flags *config.FlagSet) { + flags.String(httpPortFlag(p.PluginName), DefaultHTTPPort, + fmt.Sprintf("Configure %q server port", p.String())) + }) + } + + return p +} + +// Option is a function that can be used in NewPlugin to customize Plugin. +type Option func(*Plugin) + +// UseConf returns Option which injects a particular configuration. +func UseConf(conf Config) Option { + return func(p *Plugin) { + p.Config = &conf + } +} + +// UseDeps returns Option that can inject custom dependencies. +func UseDeps(cb func(*Deps)) Option { + return func(p *Plugin) { + cb(&p.Deps) + } +} + +// UseAuthenticator returns an Option which sets HTTP Authenticator. +func UseAuthenticator(a BasicHTTPAuthenticator) Option { + return func(p *Plugin) { + p.Deps.Authenticator = a + } +} diff --git a/vendor/github.com/ligato/cn-infra/rpc/rest/plugin_impl_fork.go b/vendor/github.com/ligato/cn-infra/rpc/rest/plugin_impl_fork.go index 10f270668a..3e15686ade 100644 --- a/vendor/github.com/ligato/cn-infra/rpc/rest/plugin_impl_fork.go +++ b/vendor/github.com/ligato/cn-infra/rpc/rest/plugin_impl_fork.go @@ -20,7 +20,7 @@ import ( "github.com/gorilla/mux" "github.com/ligato/cn-infra/config" - "github.com/ligato/cn-infra/core" + "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/logging" "github.com/unrolled/render" ) @@ -28,6 +28,8 @@ import ( // ForkPlugin checks the configuration and based on this it // delegates API calls to new instance or existing instance of HTTP server type ForkPlugin struct { + // TODO: merge this with normal http plugin + Deps ForkDeps *Config @@ -48,10 +50,18 @@ type ForkDeps struct { DefaultHTTP HTTPHandlers //inject Log logging.PluginLogger //inject - PluginName core.PluginName //inject + PluginName infra.PluginName //inject config.PluginConfig //inject } +// String returns plugin name (if not set defaults to "HTTP-FORK") +func (plugin *ForkPlugin) String() string { + if plugin.Deps.PluginName != "" { + return string(plugin.Deps.PluginName) + } + return "HTTP-FORK" +} + // Init checks config if the port is different that it creates ne HTTP server func (plugin *ForkPlugin) Init() (err error) { if plugin.Config == nil { @@ -65,11 +75,12 @@ func (plugin *ForkPlugin) Init() (err error) { plugin.Deps.Log.WithField("probePort", probePort).Info("init") if probePort > 0 && probePort != plugin.Deps.DefaultHTTP.GetPort() { childPlugNameHTTP := plugin.String() + "-HTTP" - plugin.newPlugin = &Plugin{Deps: Deps{ - Log: logging.ForPlugin(childPlugNameHTTP, plugin.Deps.Log), - PluginName: core.PluginName(childPlugNameHTTP), - }, Config: plugin.Config, - } + plug := &Plugin{} + plug.Log = logging.ForPlugin(childPlugNameHTTP) + plug.PluginName = infra.PluginName(childPlugNameHTTP) + plug.Config = plugin.Config + + plugin.newPlugin = plug plugin.delegate = plugin.newPlugin } else { @@ -83,6 +94,24 @@ func (plugin *ForkPlugin) Init() (err error) { return err } +// AfterInit starts the HTTP server. +// (only if port was different in Init()) +func (plugin *ForkPlugin) AfterInit() error { + if plugin.newPlugin != nil { + return plugin.newPlugin.AfterInit() + } + return nil +} + +// Close stops the HTTP server. +// (only if port was different in Init()) +func (plugin *ForkPlugin) Close() error { + if plugin.newPlugin != nil { + return plugin.newPlugin.Close() + } + return nil +} + // RegisterHTTPHandler registers HTTP at the given . // (delegated call) func (plugin *ForkPlugin) RegisterHTTPHandler(path string, @@ -105,29 +134,3 @@ func (plugin *ForkPlugin) GetPort() int { } return 0 } - -// AfterInit starts the HTTP server. -// (only if port was different in Init()) -func (plugin *ForkPlugin) AfterInit() error { - if plugin.newPlugin != nil { - return plugin.newPlugin.AfterInit() - } - return nil -} - -// Close stops the HTTP server. -// (only if port was different in Init()) -func (plugin *ForkPlugin) Close() error { - if plugin.newPlugin != nil { - return plugin.newPlugin.Close() - } - return nil -} - -// String returns plugin name (if not set defaults to "HTTP-FORK") -func (plugin *ForkPlugin) String() string { - if plugin.Deps.PluginName != "" { - return string(plugin.Deps.PluginName) - } - return "HTTP-FORK" -} diff --git a/vendor/github.com/ligato/cn-infra/rpc/rest/plugin_impl_rest.go b/vendor/github.com/ligato/cn-infra/rpc/rest/plugin_impl_rest.go index 8b28d9c332..6dd6ef24e5 100644 --- a/vendor/github.com/ligato/cn-infra/rpc/rest/plugin_impl_rest.go +++ b/vendor/github.com/ligato/cn-infra/rpc/rest/plugin_impl_rest.go @@ -15,38 +15,20 @@ package rest import ( - "fmt" "io" "net/http" - "strings" "github.com/gorilla/mux" "github.com/unrolled/render" - "github.com/ligato/cn-infra/config" - "github.com/ligato/cn-infra/core" - "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/utils/safeclose" ) -const ( - // DefaultHTTPPort is used during HTTP server startup unless different port was configured - DefaultHTTPPort = "9191" - // DefaultIP 0.0.0.0 - DefaultIP = "0.0.0.0" - // DefaultEndpoint 0.0.0.0:9191 - DefaultEndpoint = DefaultIP + ":" + DefaultHTTPPort -) - -// BasicHTTPAuthenticator is a delegate that implements basic HTTP authentication -type BasicHTTPAuthenticator interface { - // Authenticate returns true if user is authenticated successfully, false otherwise. - Authenticate(user string, pass string) bool -} - // Plugin struct holds all plugin-related data. type Plugin struct { Deps + *Config // Used mainly for testing purposes @@ -59,14 +41,13 @@ type Plugin struct { // Deps lists the dependencies of the Rest plugin. type Deps struct { - Log logging.PluginLogger //inject - PluginName core.PluginName //inject + infra.Deps + // Authenticator can be injected in a flavor inject method. // If there is no authenticator injected and config contains // user password, the default staticAuthenticator is instantiated. // By default the authenticator is disabled. - Authenticator BasicHTTPAuthenticator //inject - config.PluginConfig //inject + Authenticator BasicHTTPAuthenticator //inject } // Init is the plugin entry point called by Agent Core @@ -96,27 +77,6 @@ func (plugin *Plugin) Init() (err error) { return err } -// RegisterHTTPHandler registers HTTP at the given . -func (plugin *Plugin) RegisterHTTPHandler(path string, - handler func(formatter *render.Render) http.HandlerFunc, - methods ...string) *mux.Route { - plugin.Log.Debug("Register handler ", path) - - if plugin.Authenticator != nil { - return plugin.mx.HandleFunc(path, auth(handler(plugin.formatter), plugin.Authenticator)).Methods(methods...) - } - return plugin.mx.HandleFunc(path, handler(plugin.formatter)).Methods(methods...) - -} - -// GetPort returns plugin configuration port -func (plugin *Plugin) GetPort() int { - if plugin.Config != nil { - return plugin.Config.GetPort() - } - return 0 -} - // AfterInit starts the HTTP server. func (plugin *Plugin) AfterInit() (err error) { cfgCopy := *plugin.Config @@ -136,56 +96,28 @@ func (plugin *Plugin) AfterInit() (err error) { return err } -// Close stops the HTTP server. -func (plugin *Plugin) Close() error { - return safeclose.Close(plugin.server) -} - -// String returns plugin name (if not set defaults to "HTTP") -func (plugin *Plugin) String() string { - if plugin.Deps.PluginName != "" { - return string(plugin.Deps.PluginName) - } - return "HTTP" -} +// RegisterHTTPHandler registers HTTP at the given . +func (plugin *Plugin) RegisterHTTPHandler(path string, + handler func(formatter *render.Render) http.HandlerFunc, + methods ...string) *mux.Route { + plugin.Log.Debug("Registering handler: ", path) -func auth(fn http.HandlerFunc, auth BasicHTTPAuthenticator) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - user, pass, _ := r.BasicAuth() - if !auth.Authenticate(user, pass) { - w.Header().Set("WWW-Authenticate", "Provide valid username and password") - http.Error(w, "Unauthorized.", http.StatusUnauthorized) - return - } - fn(w, r) + if plugin.Authenticator != nil { + return plugin.mx.HandleFunc(path, auth(handler(plugin.formatter), plugin.Authenticator)).Methods(methods...) } -} + return plugin.mx.HandleFunc(path, handler(plugin.formatter)).Methods(methods...) -// staticAuthenticator is default implementation of BasicHTTPAuthenticator -type staticAuthenticator struct { - credentials map[string]string } -// newStaticAuthenticator creates new instance of static authenticator. -// Argument `users` is a slice of colon-separated username and password couples. -func newStaticAuthenticator(users []string) (*staticAuthenticator, error) { - sa := &staticAuthenticator{credentials: map[string]string{}} - for _, u := range users { - fields := strings.Split(u, ":") - if len(fields) != 2 { - return nil, fmt.Errorf("invalid format of basic auth entry '%v' expected 'user:pass'", u) - } - sa.credentials[fields[0]] = fields[1] +// GetPort returns plugin configuration port +func (plugin *Plugin) GetPort() int { + if plugin.Config != nil { + return plugin.Config.GetPort() } - return sa, nil + return 0 } -// Authenticate looks up the given user name and password in the internal map. -// If match is found returns true, false otherwise. -func (sa *staticAuthenticator) Authenticate(user string, pass string) bool { - password, found := sa.credentials[user] - if !found { - return false - } - return pass == password +// Close stops the HTTP server. +func (plugin *Plugin) Close() error { + return safeclose.Close(plugin.server) } diff --git a/vendor/github.com/ligato/cn-infra/servicelabel/options.go b/vendor/github.com/ligato/cn-infra/servicelabel/options.go new file mode 100644 index 0000000000..2ac90a69fa --- /dev/null +++ b/vendor/github.com/ligato/cn-infra/servicelabel/options.go @@ -0,0 +1,20 @@ +package servicelabel + +// DefaultPlugin is a default instance of Plugin. +var DefaultPlugin = *NewPlugin() + +// NewPlugin creates a new Plugin with the provided Options. +func NewPlugin(opts ...Option) *Plugin { + p := &Plugin{} + + p.PluginName = "service-label" + + for _, o := range opts { + o(p) + } + + return p +} + +// Option is a function that can be used in NewPlugin to customize Plugin. +type Option func(*Plugin) diff --git a/vendor/github.com/ligato/cn-infra/servicelabel/plugin_impl_servicelabel.go b/vendor/github.com/ligato/cn-infra/servicelabel/plugin_impl_servicelabel.go index 90c3704282..0956b1479d 100644 --- a/vendor/github.com/ligato/cn-infra/servicelabel/plugin_impl_servicelabel.go +++ b/vendor/github.com/ligato/cn-infra/servicelabel/plugin_impl_servicelabel.go @@ -17,12 +17,14 @@ package servicelabel import ( "fmt" + "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/logging/logrus" "github.com/namsral/flag" ) // Plugin exposes the service label(i.e. the string used to identify the particular VNF) to the other plugins. type Plugin struct { + infra.PluginName // MicroserviceLabel identifies particular VNF. // Used primarily as a key prefix to ETCD data store. MicroserviceLabel string diff --git a/vendor/github.com/ligato/cn-infra/utils/clienttls/doc.go b/vendor/github.com/ligato/cn-infra/utils/clienttls/doc.go deleted file mode 100644 index 8d3944babb..0000000000 --- a/vendor/github.com/ligato/cn-infra/utils/clienttls/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package clienttls provides tls utilities. -package clienttls diff --git a/vendor/github.com/ligato/cn-infra/utils/clienttls/tlsutil.go b/vendor/github.com/ligato/cn-infra/utils/clienttls/tlsutil.go deleted file mode 100644 index 3b81e1eb61..0000000000 --- a/vendor/github.com/ligato/cn-infra/utils/clienttls/tlsutil.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package clienttls provides tls utilities. -package clienttls - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "github.com/coreos/etcd/pkg/tlsutil" -) - -// TLS stores the client side TLS settings -type TLS struct { - Enabled bool `json:"enabled"` // enable/disable TLS - SkipVerify bool `json:"skip-verify"` // whether to skip verification of server name & certificate - Certfile string `json:"cert-file"` // client certificate - Keyfile string `json:"key-file"` // client private key - CAfile string `json:"ca-file"` // certificate authority -} - -// CreateTLSConfig used to generate the crypto/tls Config -func CreateTLSConfig(config TLS) (*tls.Config, error) { - var ( - cert *tls.Certificate - cp *x509.CertPool - err error - ) - if config.Certfile != "" && config.Keyfile != "" { - cert, err = tlsutil.NewCert(config.Certfile, config.Keyfile, nil) - if err != nil { - return nil, fmt.Errorf("tlsutil.NewCert() failed: %s", err) - } - } - - if config.CAfile != "" { - cp, err = tlsutil.NewCertPool([]string{config.CAfile}) - if err != nil { - return nil, fmt.Errorf("tlsutil.NewCertPool() failed: %s", err) - } - } - - tlsConfig := &tls.Config{ - MinVersion: tls.VersionTLS10, - InsecureSkipVerify: config.SkipVerify, - RootCAs: cp, - } - if cert != nil { - tlsConfig.Certificates = []tls.Certificate{*cert} - } - - return tlsConfig, nil -} diff --git a/vendor/github.com/ligato/cn-infra/db/sql/plugin_api_sql.go b/vendor/github.com/ligato/cn-infra/utils/once/return_error.go similarity index 55% rename from vendor/github.com/ligato/cn-infra/db/sql/plugin_api_sql.go rename to vendor/github.com/ligato/cn-infra/utils/once/return_error.go index bc3ac1b927..99d3ede91e 100644 --- a/vendor/github.com/ligato/cn-infra/db/sql/plugin_api_sql.go +++ b/vendor/github.com/ligato/cn-infra/utils/once/return_error.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. +// Copyright (c) 2018 Cisco and/or its affiliates. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,10 +12,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -package sql +package once -// BrokerPlugin provides unifying interface for different SQL-like datastore implementations. -type BrokerPlugin interface { - // NewBroker returns a Broker instance that works with Data Base. - NewBroker() Broker +import "sync" + +// ReturnError is a wrapper around sync.Once that properly handles: +// func() error +// instead of just +// func() +type ReturnError struct { + once sync.Once + err error +} + +// Do provides the same functionality as sync.Once.Do(func()) but for +// func() error +func (owe *ReturnError) Do(f func() error) error { + owe.once.Do(func() { + owe.err = f() + }) + return owe.err } diff --git a/vendor/github.com/ligato/cn-infra/utils/structs/doc.go b/vendor/github.com/ligato/cn-infra/utils/structs/doc.go deleted file mode 100644 index f3cfd899ae..0000000000 --- a/vendor/github.com/ligato/cn-infra/utils/structs/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package structs provides reflection utilities to inspect structures. -package structs diff --git a/vendor/github.com/ligato/cn-infra/utils/structs/structs_reflection.go b/vendor/github.com/ligato/cn-infra/utils/structs/structs_reflection.go deleted file mode 100644 index ccadcc879d..0000000000 --- a/vendor/github.com/ligato/cn-infra/utils/structs/structs_reflection.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package structs - -import ( - "reflect" - "strings" -) - -// FindField compares the pointers (pointerToAField with all fields in pointerToAStruct) -func FindField(pointerToAField interface{}, pointerToAStruct interface{}) (field *reflect.StructField, found bool) { - fieldVal := reflect.ValueOf(pointerToAField) - - if fieldVal.Kind() != reflect.Ptr { - panic("pointerToAField must be a pointer") - } - - strct := reflect.Indirect(reflect.ValueOf(pointerToAStruct)) - numField := strct.NumField() - - for i := 0; i < numField; i++ { - sf := strct.Field(i) - - //logrus.DefaultLogger().Info("xxxxxxxxxxx ", sf.Kind().String(), " ", sf.String()) - - if sf.Kind() == reflect.Ptr || sf.Kind() == reflect.Interface { - if fieldVal.Interface() == sf { - field := strct.Type().Field(i) - return &field, true - } - } else if sf.CanAddr() { - if fieldVal.Pointer() == sf.Addr().Pointer() { - field := strct.Type().Field(i) - return &field, true - } - } - } - - return nil, false -} - -// ListExportedFields returns all fields of a structure that starts wit uppercase letter -func ListExportedFields(val interface{}, predicates ...ExportedPredicate) []*reflect.StructField { - valType := reflect.Indirect(reflect.ValueOf(val)).Type() - len := valType.NumField() - ret := []*reflect.StructField{} - for i := 0; i < len; i++ { - structField := valType.Field(i) - - if FieldExported(&structField, predicates...) { - ret = append(ret, &structField) - } - } - - return ret -} - -// ExportedPredicate defines a callback (used in func FieldExported) -type ExportedPredicate func(field *reflect.StructField) bool - -// FieldExported returns true if field name starts with uppercase -func FieldExported(field *reflect.StructField, predicates ...ExportedPredicate) (exported bool) { - if field.Name[0] == strings.ToUpper(string(field.Name[0]))[0] { - expPredic := true - for _, predicate := range predicates { - if !predicate(field) { - expPredic = false - break - } - } - - return expPredic - } - - return false -} - -// ListExportedFieldsPtrs iterates struct fields and return slice of pointers to field values -func ListExportedFieldsPtrs(val interface{}, predicates ...ExportedPredicate) ( - fields []*reflect.StructField, valPtrs []interface{}) { - - rVal := reflect.Indirect(reflect.ValueOf(val)) - valPtrs = []interface{}{} - fields = []*reflect.StructField{} - - for i := 0; i < rVal.NumField(); i++ { - field := rVal.Field(i) - structField := rVal.Type().Field(i) - if !FieldExported(&structField, predicates...) { - continue - } - - switch field.Kind() { - case reflect.Ptr, reflect.Interface: - if field.IsNil() { - p := reflect.New(field.Type().Elem()) - field.Set(p) - valPtrs = append(valPtrs, p.Interface()) - } else { - valPtrs = append(valPtrs, field.Interface()) - } - case reflect.Slice, reflect.Chan, reflect.Map: - if field.IsNil() { - p := reflect.New(field.Type()) - field.Set(p.Elem()) - valPtrs = append(valPtrs, field.Addr().Interface()) - } else { - valPtrs = append(valPtrs, field.Interface()) - } - default: - if field.CanAddr() { - valPtrs = append(valPtrs, field.Addr().Interface()) - } else if field.IsValid() { - valPtrs = append(valPtrs, field.Interface()) - } else { - panic("invalid field") - } - } - - fields = append(fields, &structField) - } - - return fields, valPtrs -} diff --git a/vendor/github.com/maraino/go-mock/.travis.yml b/vendor/github.com/maraino/go-mock/.travis.yml deleted file mode 100644 index 87e7c9a53a..0000000000 --- a/vendor/github.com/maraino/go-mock/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -install: - - go get ./... - - go get github.com/axw/gocov/gocov - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - go test -v -covermode=count -coverprofile=coverage.out - - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN \ No newline at end of file diff --git a/vendor/github.com/maraino/go-mock/AUTHORS b/vendor/github.com/maraino/go-mock/AUTHORS deleted file mode 100644 index 1f0d0ac8a5..0000000000 --- a/vendor/github.com/maraino/go-mock/AUTHORS +++ /dev/null @@ -1,4 +0,0 @@ -# go-mock - A mock framework for Go (https://github.com/maraino/go-mock) -# is brought to you by: - -Mariano Cano diff --git a/vendor/github.com/maraino/go-mock/LICENSE b/vendor/github.com/maraino/go-mock/LICENSE deleted file mode 100644 index 4a0a9b01d3..0000000000 --- a/vendor/github.com/maraino/go-mock/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Mariano Cano - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/maraino/go-mock/Makefile b/vendor/github.com/maraino/go-mock/Makefile deleted file mode 100644 index 4aaceb4721..0000000000 --- a/vendor/github.com/maraino/go-mock/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -PACKAGE=github.com/maraino/go-mock - -all: - go build $(PACKAGE) - -test: - go test -cover $(PACKAGE) - -cover: - go test -coverprofile=c.out $(PACKAGE) - go tool cover -html=c.out diff --git a/vendor/github.com/maraino/go-mock/README.md b/vendor/github.com/maraino/go-mock/README.md deleted file mode 100644 index c510f1a17e..0000000000 --- a/vendor/github.com/maraino/go-mock/README.md +++ /dev/null @@ -1,201 +0,0 @@ -go-mock -======= - -A mocking framework for [Go](http://golang.org/). - -Read online reference at http://godoc.org/github.com/maraino/go-mock - -Status ------- - -[![Build Status](https://travis-ci.org/maraino/go-mock.svg)](https://travis-ci.org/maraino/go-mock) -[![Coverage Status](https://coveralls.io/repos/maraino/go-mock/badge.svg?branch=master&service=github)](https://coveralls.io/github/maraino/go-mock?branch=master) -[![GoDoc](https://godoc.org/github.com/maraino/go-mock?status.svg)](http://godoc.org/github.com/maraino/go-mock) - -Usage ------ - -Let's say that we have an interface like this that we want to Mock. - - type Client interface { - Request(url *url.URL) (int, string, error) - } - - -We need to create a new struct that implements the interface. But we will use -github.com/maraino/go-mock to replace the actual calls with some specific results. - - import ( - "github.com/maraino/go-mock" - "net/url" - ) - - type MyClient struct { - mock.Mock - } - - func (c *MyClient) Request(url *url.URL) (int, string, error) { - ret := c.Called(url) - return ret.Int(0), ret.String(1), ret.Error(2) - } - -Then we need to configure the responses for the defined functions: - - c := &MyClient{} - url, _ := url.Parse("http://www.example.org") - c.When("Request", url).Return(200, "{result:1}", nil).Times(1) - -We will execute the function that we have Mocked: - - code, json, err := c.Request(url) - fmt.Printf("Code: %d, JSON: %s, Error: %v\n", code, json, err) - -This will produce the output: - - Code: 200, JSON: {result:1}, Error: - -And finally if we want to verify the number of calls we can use: - - if ok, err := c.Verify(); !ok { - fmt.Println(err) - } - -API Reference -------------- - -### func (m *Mock) When(name string, arguments ...interface{}) *MockFunction - -Creates an stub for a specific function and a list of arguments. - - c.When("FunctionName", argument1, argument2, ...) - -It returns a mock.MockFunction that can be used to configure the behavior and -validations when the method is called - -### func (m *Mock) Reset() *Mock - -Removes all the defined stubs and returns a clean mock. - -### func (m *Mock) Verify() (bool, error) - -Checks all validations and return if true if they are ok, or false and an error -if at least one validation have failed. - -### func (m *Mock) Called(arguments ...interface{}) *MockResult - -Called must be used in the struct that implements the interface that we want to mock. -It's the code that glues that struct with the go-mock package. - -We will need to implement the interface and then use Called with the function arguments -and use the return value to return the values to our mocked struct. - - type Map interface { - Set(key string, value interface{}) - Get(key string) (interface{}, error) - GetString(key string) (string, error) - Load(key string, value interface{}) error - } - - type MyMap struct { - mock.Mock - } - - func (m *MyMap) Set(key string, value interface{}) { - m.Called(key, value) - } - - func (m *MyMap) Get(key string) (interface{}, error) { - ret := m.Called(key) - return ret.Get(0), ret.Error(1) - } - - func (m *MyMap) GetString(key string) (string, error) { - ret := m.Called(key) - return ret.String(0), ret.Error(1) - } - - func (m *MyMap) Load(key string, value interface{}) error { - ret := m.Called(key, value) - return ret.Error(0) - } - -### func (f *MockFunction) Return(v ...interface{}) *MockFunction - -Defines the return parameters of our stub. The use of it is pretty simple, we -can simply chain mock.When with Return to set the return values. - - m.When("Get", "a-test-key").Return("a-test-value", nil) - m.When("GetString", "a-test-key").Return("a-test-value", nil) - m.When("Get", "another-test-key").Return(123, nil) - m.When("Get", mock.Any).Return(nil, errors.New("not-found")) - -If no return values are set, the method will return 0 for numeric types, -false for bools, "" for strings and nil for errors or any other type. - -### func (f *MockFunction) ReturnToArgument(n int, v interface{}) *MockFunction - -Defines a special return parameter to an argument of the function. We can also chain -this method to a When or a Return. - - m.When("Load", "a-test-key").ReturnToArgument(1, "a-test-value") - m.When("Load", "another-test-key").Return(nil).ReturnToArgument(1, 123) - -### func (f *MockFunction) Panic(v interface{}) *MockFunction - -Panic will cause a panic when the stub method is called with the specified parameters. - - m.When("Get", "foobar").Panic("internal error") - -### func (f *MockFunction) Times(a int) *MockFunction - -Defines the exact number of times a method should be called. This is validated if mock.Verify -is executed. - - m.When("Get", "a-test-key").Return("a-test-value", nil).Times(1) - -### func (f *MockFunction) AtLeast(a int) *MockFunction - -Defines the minimum number of times a method should be called. This is validated if mock.Verify -is executed. - - m.When("Get", "a-test-key").Return("a-test-value", nil).AtLeast(2) - -### func (f *MockFunction) AtMost(a int) *MockFunction - -Defines the maximum number of times a method should be called. This is validated if mock.Verify -is executed. - - m.When("Get", "a-test-key").Return("a-test-value", nil).AtMost(1) - -### func (f *MockFunction) Between(a, b int) *MockFunction - -Defines a range of times a method should be called. This is validated if mock.Verify -is executed. - - m.When("Get", "a-test-key").Return("a-test-value", nil).Between(2, 5) - -### func (f *MockFunction) Timeout(d time.Duration) *MockFunction - -Defines a timeout to sleep before returning the value of a function. - - m.When("Get", "a-test-key").Return("a-test-value", nil).Timeout(100 * time.Millisecond) - -### func (f *MockFunction) Call(call interface{}) *MockFunction - -Defines a custom function that will be executed instead of the function in the stub. -The return values of the function will be used as the return values for the stub. - - datastore := make(map[string]interface{}) - - m.When("Get", mock.Any).Call(func(key string) (interface{}, error) { - if i, ok := datastore[key]; ok { - return i, nil - } else { - return nil, ErrNotFound - } - }) - - m.When("Set", mock.Any, mock.Any).Call(func(key string, value interface{}) error { - datastore[key] = value - return nil - }) diff --git a/vendor/github.com/maraino/go-mock/mock.go b/vendor/github.com/maraino/go-mock/mock.go deleted file mode 100644 index 8e6b056538..0000000000 --- a/vendor/github.com/maraino/go-mock/mock.go +++ /dev/null @@ -1,683 +0,0 @@ -// Package mock provides a mocking framework for Go. -// -// https://github.com/maraino/go-mock -package mock - -import ( - "fmt" - "reflect" - "runtime" - "strings" - "sync" - "time" - - "github.com/kr/pretty" -) - -// Mock should be embedded in the struct that we want to act as a Mock. -// -// Example: -// type MyClient struct { -// mock.Mock -// } -type Mock struct { - Functions []*MockFunction - inorder bool - order uint - - mutex sync.Mutex -} - -type MockCountCheckType int - -const ( - NONE MockCountCheckType = iota - TIMES - AT_LEAST - AT_MOST - BETWEEN -) - -// MockFunction is the struct used to store the properties of a method stub. -type MockFunction struct { - Name string - Arguments []interface{} - ReturnValues []interface{} - ReturnToArguments []MockReturnToArgument - PanicValue interface{} - count int - countCheck MockCountCheckType - times [2]int - order uint - timeout time.Duration - call reflect.Value -} - -// MockReturnToArgument defines the function arguments used as return parameters. -type MockReturnToArgument struct { - Argument int - Value interface{} -} - -// MockResult struct is used to store the return arguments of a method stub. -type MockResult struct { - Result []interface{} -} - -// AnyType defines the type used as a replacement for any kind of argument in the stub configuration. -type AnyType string - -// Any is the constant that should be used to represent a AnyType. -// -// Example: -// mock.When("MyMethod", mock.Any, mock.Any).Return(0) -const ( - Any AnyType = "mock.any" -) - -// AnythingOfType defines the type used as a replacement for an argument of a -// specific type in the stub configuration. -type AnythingOfType string - -// AnyOfType is a helper to define AnythingOfType arguments. -// -// Example: -// mock.When("MyMethod", mock.AnyOfType("int"), mock.AnyOfType("string")).Return(0) -func AnyOfType(t string) AnythingOfType { - return AnythingOfType(t) -} - -// AnyIfType defines the type used as an argument that satisfies a condition. -type AnyIfType func(interface{}) bool - -// AnyIf is a helper to define AnyIfType arguments. -// -// Example: -// f := func(i interface{}) bool { -// ii, ok := i.(MyType) -// return ok && ii.ID = "the-id" -// } -// mock.When("MyMethod", mock.AnyIf(f)).Return(0) -func AnyIf(f func(interface{}) bool) AnyIfType { - return AnyIfType(f) -} - -// RestType indicates there may optionally be one or more remaining elements. -type RestType string - -// Rest indicates there may optionally be one or more remaining elements. -// -// Example: -// mock.When("MyMethod", mock.Slice(123, mock.Rest)).Return(0) -const Rest RestType = "mock.rest" - -func match(actual, expected interface{}) bool { - switch expected := expected.(type) { - case AnyType: - return true - - case AnyIfType: - return expected(actual) - - case AnythingOfType: - return reflect.TypeOf(actual).String() == string(expected) - - default: - if expected == nil { - if actual == nil { - return true - } else { - var v = reflect.ValueOf(actual) - - return v.CanInterface() && v.IsNil() - } - } else { - if reflect.DeepEqual(actual, expected) { - return true - } else { - return reflect.ValueOf(actual) == reflect.ValueOf(expected) - } - } - } - - return false -} - -// Slice is a helper to define AnyIfType arguments for slices and their elements. -// -// Example: -// mock.When("MyMethod", mock.Slice(123, mock.Rest)).Return(0) -func Slice(elements ...interface{}) AnyIfType { - return AnyIf(func(argument interface{}) bool { - var v = reflect.ValueOf(argument) - - if v.Kind() != reflect.Slice { - return false - } - - var el, al = len(elements), v.Len() - - if el == 0 { - return al == 0 - } - - if elements[el-1] == Rest { - el-- - - if al < el { - return false - } - } else if al != el { - return false - } - - for i := 0; i < el; i++ { - if !match(v.Index(i).Interface(), elements[i]) { - return false - } - } - - return true - }) -} - -// Verify verifies the restrictions set in the stubbing. -func (m *Mock) Verify() (bool, error) { - for i, f := range m.Functions { - switch f.countCheck { - case TIMES: - if f.count != f.times[1] { - return false, fmt.Errorf("Function #%d %s executed %d times, expected: %d", i+1, f.Name, f.count, f.times[1]) - } - case AT_LEAST: - if f.count < f.times[1] { - return false, fmt.Errorf("Function #%d %s executed %d times, expected at least: %d", i+1, f.Name, f.count, f.times[1]) - } - case AT_MOST: - if f.count > f.times[1] { - return false, fmt.Errorf("Function #%d %s executed %d times, expected at most: %d", i+1, f.Name, f.count, f.times[1]) - } - case BETWEEN: - if f.count < f.times[0] || f.count > f.times[1] { - return false, fmt.Errorf("Function #%d %s executed %d times, expected between: [%d, %d]", i+1, f.Name, f.count, f.times[0], f.times[1]) - } - } - } - return true, nil -} - -// HasVerify is used as the input of VerifyMocks (Mock satisfies it, obviously) -type HasVerify interface { - Verify() (bool, error) -} - -// VerifyMocks verifies a list of mocks, and returns the first error, if any. -func VerifyMocks(mocks ...HasVerify) (bool, error) { - for _, m := range mocks { - if ok, err := m.Verify(); !ok { - return ok, err - } - } - return true, nil -} - -// Used to represent a test we can fail, without importing the testing package -// Importing "testing" in a file not named *_test.go results in tons of test.* flags being added to any compiled binary including this package -type HasError interface { - Error(...interface{}) -} - -// Fail the test if any of the mocks fail verification -func AssertVerifyMocks(t HasError, mocks ...HasVerify) { - if ok, err := VerifyMocks(mocks...); !ok { - t.Error(err) - } -} - -// Reset removes all stubs defined. -func (m *Mock) Reset() *Mock { - defer m.mutex.Unlock() - m.mutex.Lock() - - m.Functions = nil - m.order = 0 - return m -} - -// When defines an stub of one method with some specific arguments. It returns a *MockFunction -// that can be configured with Return, ReturnToArgument, Panic, ... -func (m *Mock) When(name string, arguments ...interface{}) *MockFunction { - defer m.mutex.Unlock() - m.mutex.Lock() - - f := &MockFunction{ - Name: name, - Arguments: arguments, - } - - m.Functions = append(m.Functions, f) - return f -} - -// Called is the function used in the mocks to replace the actual task. -// -// Example: -// func (m *MyClient) Request(url string) (int, string, error) { -// r := m.Called(url) -// return r.Int(0), r.String(1), r.Error(2) -// } -func (m *Mock) Called(arguments ...interface{}) *MockResult { - var timeout time.Duration - defer func() { - m.mutex.Unlock() - if timeout > 0 { - time.Sleep(timeout) - } - }() - m.mutex.Lock() - - pc, _, _, ok := runtime.Caller(1) - if !ok { - panic("Could not get the caller information") - } - - functionPath := runtime.FuncForPC(pc).Name() - parts := strings.Split(functionPath, ".") - functionName := parts[len(parts)-1] - - if f := m.find(functionName, arguments...); f != nil { - // Increase the counter - f.count++ - f.order = m.order - m.order++ - - if f.call.IsValid() { - typ := f.call.Type() - numIn := typ.NumIn() - numArgs := len(arguments) - - // Assign arguments in order. - // Not all of them are strictly required. - values := make([]reflect.Value, numIn) - for i := 0; i < numIn; i++ { - if i < numArgs { - values[i] = reflect.ValueOf(arguments[i]) - } else { - values[i] = reflect.Zero(typ.In(i)) - } - } - - if typ.IsVariadic() { - values = f.call.CallSlice(values) - } else { - values = f.call.Call(values) - } - - for i := range values { - f.ReturnValues = append(f.ReturnValues, values[i].Interface()) - } - } - - timeout = f.timeout - - if f.PanicValue != nil { - panic(f.PanicValue) - } - - // Return to arguments - for _, r := range f.ReturnToArguments { - arg := arguments[r.Argument] - argTyp := reflect.TypeOf(arg) - argElem := reflect.ValueOf(arg).Elem() - typ := reflect.TypeOf(r.Value) - if typ.Kind() == reflect.Ptr { - if typ == argTyp { - // *type vs *type - argElem.Set(reflect.ValueOf(r.Value).Elem()) - } else { - // *type vs **type - argElem.Set(reflect.ValueOf(r.Value)) - } - } else { - if typ == argTyp.Elem() { - // type vs *type - argElem.Set(reflect.ValueOf(r.Value)) - } else { - // type vs **type - value := reflect.New(typ).Elem() - value.Set(reflect.ValueOf(r.Value)) - argElem.Set(value.Addr()) - } - } - } - - return &MockResult{f.ReturnValues} - } - - var msg string - if len(arguments) == 0 { - msg = fmt.Sprintf("Mock call missing for %s()", functionName) - } else { - argsStr := pretty.Sprintf("%# v", arguments) - argsStr = argsStr[15 : len(argsStr)-1] - msg = fmt.Sprintf("Mock call missing for %s(%s)", functionName, argsStr) - } - panic(msg) -} - -func (m *Mock) find(name string, arguments ...interface{}) *MockFunction { - var ff *MockFunction - - for _, f := range m.Functions { - if f.Name != name { - continue - } - - if len(f.Arguments) != len(arguments) { - continue - } - - found := true - for i, arg := range f.Arguments { - switch arg.(type) { - case AnyType: - continue - case AnythingOfType: - if string(arg.(AnythingOfType)) == reflect.TypeOf(arguments[i]).String() { - continue - } else { - found = false - } - case AnyIfType: - cond, ok := arg.(AnyIfType) - if ok && cond(arguments[i]) { - continue - } else { - found = false - } - default: - v := reflect.ValueOf(arguments[i]) - if arg == nil && (arguments[i] == nil || (v.CanInterface() && v.IsNil())) { - continue - } - - if reflect.DeepEqual(arg, arguments[i]) || reflect.ValueOf(arg) == reflect.ValueOf(arguments[i]) { - continue - } else { - found = false - } - } - } - - if !found { - continue - } - - // Check if the count check is valid. - // If it's not try to match another function. - if f.isMaxCountCheck() { - if ff == nil { - ff = f - } - continue - } - - return f - } - - return ff -} - -// Return defines the return values of a *MockFunction. -func (f *MockFunction) Return(v ...interface{}) *MockFunction { - f.ReturnValues = append(f.ReturnValues, v...) - return f -} - -// ReturnToArgument defines the values returned to a specific argument of a *MockFunction. -func (f *MockFunction) ReturnToArgument(n int, v interface{}) *MockFunction { - f.ReturnToArguments = append(f.ReturnToArguments, MockReturnToArgument{n, v}) - return f -} - -// Panic defines a panic for a specific *MockFunction. -func (f *MockFunction) Panic(v interface{}) *MockFunction { - f.PanicValue = v - return f -} - -// Times defines how many times a *MockFunction must be called. -// This is verified if mock.Verify is called. -func (f *MockFunction) Times(a int) *MockFunction { - f.countCheck = TIMES - f.times = [2]int{-1, a} - return f -} - -// AtLeast defines the number of times that a *MockFunction must be at least called. -// This is verified if mock.Verify is called. -func (f *MockFunction) AtLeast(a int) *MockFunction { - f.countCheck = AT_LEAST - f.times = [2]int{-1, a} - return f -} - -// AtMost defines the number of times that a *MockFunction must be at most called. -// This is verified if mock.Verify is called. -func (f *MockFunction) AtMost(a int) *MockFunction { - f.countCheck = AT_MOST - f.times = [2]int{-1, a} - return f -} - -// Between defines a range of times that a *MockFunction must be called. -// This is verified if mock.Verify is called. -func (f *MockFunction) Between(a, b int) *MockFunction { - f.countCheck = BETWEEN - f.times = [2]int{a, b} - return f -} - -// Timeout defines a timeout to sleep before returning the value of a function. -func (f *MockFunction) Timeout(d time.Duration) *MockFunction { - f.timeout = d - return f -} - -// Call executes a function passed as an argument using the arguments pased to the stub. -// If the function returns any output parameters they will be used as a return arguments -// when the stub is called. If the call argument is not a function it will panic when -// the stub is executed. -// -// Example: -// mock.When("MyMethod", mock.Any, mock.Any).Call(func(a int, b int) int { -// return a+b -// }) -func (f *MockFunction) Call(call interface{}) *MockFunction { - f.call = reflect.ValueOf(call) - return f -} - -// Check if the number of times that a function has been called -// has reach the top range. -func (f *MockFunction) isMaxCountCheck() bool { - switch f.countCheck { - case TIMES: - if f.count >= f.times[1] { - return true - } - case AT_LEAST: - // At least does not have a maximum - return false - case AT_MOST: - if f.count >= f.times[1] { - return true - } - case BETWEEN: - if f.count >= f.times[1] { - return true - } - } - - return false -} - -// Contains returns true if the results have the index i, false otherwise. -func (r *MockResult) Contains(i int) bool { - if len(r.Result) > i { - return true - } else { - return false - } -} - -// Get returns a specific return parameter. -// If a result has not been set, it returns nil, -func (r *MockResult) Get(i int) interface{} { - if r.Contains(i) { - return r.Result[i] - } else { - return nil - } -} - -// GetType returns a specific return parameter with the same type of -// the second argument. A nil version of the type can be casted -// without causing a panic. -func (r *MockResult) GetType(i int, ii interface{}) interface{} { - t := reflect.TypeOf(ii) - if t == nil { - panic(fmt.Sprintf("Could not get type information for %#v", ii)) - } - v := reflect.New(t).Elem() - if r.Contains(i) { - if r.Result[i] != nil { - v.Set(reflect.ValueOf(r.Result[i])) - } - } - return v.Interface() -} - -// Bool returns a specific return parameter as a bool. -// If a result has not been set, it returns false. -func (r *MockResult) Bool(i int) bool { - if r.Contains(i) { - return r.Result[i].(bool) - } else { - return false - } -} - -// Byte returns a specific return parameter as a byte. -// If a result has not been set, it returns 0. -func (r *MockResult) Byte(i int) byte { - if r.Contains(i) { - return r.Result[i].(byte) - } else { - return 0 - } -} - -// Bytes returns a specific return parameter as a []byte. -// If a result has not been set, it returns nil. -func (r *MockResult) Bytes(i int) []byte { - if r.Contains(i) { - if rr := r.Result[i]; rr == nil { - return nil - } else { - return rr.([]byte) - } - } else { - return nil - } -} - -// Error returns a specific return parameter as an error. -// If a result has not been set, it returns nil. -func (r *MockResult) Error(i int) error { - if r.Contains(i) && r.Result[i] != nil { - return r.Result[i].(error) - } else { - return nil - } -} - -// Float32 returns a specific return parameter as a float32. -// If a result has not been set, it returns 0. -func (r *MockResult) Float32(i int) float32 { - if r.Contains(i) { - return r.Result[i].(float32) - } else { - return 0 - } -} - -// Float64 returns a specific return parameter as a float64. -// If a result has not been set, it returns 0. -func (r *MockResult) Float64(i int) float64 { - if r.Contains(i) { - return r.Result[i].(float64) - } else { - return 0 - } -} - -// Int returns a specific return parameter as an int. -// If a result has not been set, it returns 0. -func (r *MockResult) Int(i int) int { - if r.Contains(i) { - return r.Result[i].(int) - } else { - return 0 - } -} - -// Int8 returns a specific return parameter as an int8. -// If a result has not been set, it returns 0. -func (r *MockResult) Int8(i int) int8 { - if r.Contains(i) { - return r.Result[i].(int8) - } else { - return 0 - } -} - -// Int16 returns a specific return parameter as an int16. -// If a result has not been set, it returns 0. -func (r *MockResult) Int16(i int) int16 { - if r.Contains(i) { - return r.Result[i].(int16) - } else { - return 0 - } -} - -// Int32 returns a specific return parameter as an int32. -// If a result has not been set, it returns 0. -func (r *MockResult) Int32(i int) int32 { - if r.Contains(i) { - return r.Result[i].(int32) - } else { - return 0 - } -} - -// Int64 returns a specific return parameter as an int64. -// If a result has not been set, it returns 0. -func (r *MockResult) Int64(i int) int64 { - if r.Contains(i) { - return r.Result[i].(int64) - } else { - return 0 - } -} - -// String returns a specific return parameter as a string. -// If a result has not been set, it returns "". -func (r *MockResult) String(i int) string { - if r.Contains(i) { - return r.Result[i].(string) - } else { - return "" - } -} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE deleted file mode 100644 index f9c841a51e..0000000000 --- a/vendor/github.com/mitchellh/go-homedir/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md deleted file mode 100644 index d70706d5b3..0000000000 --- a/vendor/github.com/mitchellh/go-homedir/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# go-homedir - -This is a Go library for detecting the user's home directory without -the use of cgo, so the library can be used in cross-compilation environments. - -Usage is incredibly simple, just call `homedir.Dir()` to get the home directory -for a user, and `homedir.Expand()` to expand the `~` in a path to the home -directory. - -**Why not just use `os/user`?** The built-in `os/user` package requires -cgo on Darwin systems. This means that any Go code that uses that package -cannot cross compile. But 99% of the time the use for `os/user` is just to -retrieve the home directory, which we can do for the current user without -cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go deleted file mode 100644 index 47e1f9ef8e..0000000000 --- a/vendor/github.com/mitchellh/go-homedir/homedir.go +++ /dev/null @@ -1,137 +0,0 @@ -package homedir - -import ( - "bytes" - "errors" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" -) - -// DisableCache will disable caching of the home directory. Caching is enabled -// by default. -var DisableCache bool - -var homedirCache string -var cacheLock sync.RWMutex - -// Dir returns the home directory for the executing user. -// -// This uses an OS-specific method for discovering the home directory. -// An error is returned if a home directory cannot be detected. -func Dir() (string, error) { - if !DisableCache { - cacheLock.RLock() - cached := homedirCache - cacheLock.RUnlock() - if cached != "" { - return cached, nil - } - } - - cacheLock.Lock() - defer cacheLock.Unlock() - - var result string - var err error - if runtime.GOOS == "windows" { - result, err = dirWindows() - } else { - // Unix-like system, so just assume Unix - result, err = dirUnix() - } - - if err != nil { - return "", err - } - homedirCache = result - return result, nil -} - -// Expand expands the path to include the home directory if the path -// is prefixed with `~`. If it isn't prefixed with `~`, the path is -// returned as-is. -func Expand(path string) (string, error) { - if len(path) == 0 { - return path, nil - } - - if path[0] != '~' { - return path, nil - } - - if len(path) > 1 && path[1] != '/' && path[1] != '\\' { - return "", errors.New("cannot expand user-specific home dir") - } - - dir, err := Dir() - if err != nil { - return "", err - } - - return filepath.Join(dir, path[1:]), nil -} - -func dirUnix() (string, error) { - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - - // If that fails, try getent - var stdout bytes.Buffer - cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - // If the error is ErrNotFound, we ignore it. Otherwise, return it. - if err != exec.ErrNotFound { - return "", err - } - } else { - if passwd := strings.TrimSpace(stdout.String()); passwd != "" { - // username:password:uid:gid:gecos:home:shell - passwdParts := strings.SplitN(passwd, ":", 7) - if len(passwdParts) > 5 { - return passwdParts[5], nil - } - } - } - - // If all else fails, try the shell - stdout.Reset() - cmd = exec.Command("sh", "-c", "cd && pwd") - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - return "", err - } - - result := strings.TrimSpace(stdout.String()) - if result == "" { - return "", errors.New("blank output when reading home directory") - } - - return result, nil -} - -func dirWindows() (string, error) { - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - - drive := os.Getenv("HOMEDRIVE") - path := os.Getenv("HOMEPATH") - home := drive + path - if drive == "" || path == "" { - home = os.Getenv("USERPROFILE") - } - if home == "" { - return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank") - } - - return home, nil -} diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore deleted file mode 100644 index c2bb6e4af1..0000000000 --- a/vendor/github.com/pierrec/lz4/.gitignore +++ /dev/null @@ -1,31 +0,0 @@ -# Created by https://www.gitignore.io/api/macos - -### macOS ### -*.DS_Store -.AppleDouble -.LSOverride - -# Icon must end with two \r -Icon - - -# Thumbnails -._* - -# Files that might appear in the root of a volume -.DocumentRevisions-V100 -.fseventsd -.Spotlight-V100 -.TemporaryItems -.Trashes -.VolumeIcon.icns -.com.apple.timemachine.donotpresent - -# Directories potentially created on remote AFP share -.AppleDB -.AppleDesktop -Network Trash Folder -Temporary Items -.apdisk - -# End of https://www.gitignore.io/api/macos diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml deleted file mode 100644 index 78be21cc82..0000000000 --- a/vendor/github.com/pierrec/lz4/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.x - -script: - - go test -v -cpu=2 - - go test -v -cpu=2 -race \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE deleted file mode 100644 index bd899d8353..0000000000 --- a/vendor/github.com/pierrec/lz4/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2015, Pierre Curto -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of xxHash nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md deleted file mode 100644 index dd3c9d47e1..0000000000 --- a/vendor/github.com/pierrec/lz4/README.md +++ /dev/null @@ -1,31 +0,0 @@ -[![godoc](https://godoc.org/github.com/pierrec/lz4?status.png)](https://godoc.org/github.com/pierrec/lz4) -[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4) - -# lz4 -LZ4 compression and decompression in pure Go - -## Usage - -```go -import "github.com/pierrec/lz4" -``` - -## Description - -Package lz4 implements reading and writing lz4 compressed data (a frame), -as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, -using an io.Reader (decompression) and io.Writer (compression). -It is designed to minimize memory usage while maximizing throughput by being able to -[de]compress data concurrently. - -The Reader and the Writer support concurrent processing provided the supplied buffers are -large enough (in multiples of BlockMaxSize) and there is no block dependency. -Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently. -The runtime.GOMAXPROCS() value is used to apply concurrency or not. - -Although the block level compression and decompression functions are exposed and are fully compatible -with the lz4 block format definition, they are low level and should not be used directly. -For a complete description of an lz4 compressed block, see: -http://fastcompression.blogspot.fr/2011/05/lz4-explained.html - -See https://github.com/Cyan4973/lz4 for the reference C implementation. diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go deleted file mode 100644 index 44e3eaaac7..0000000000 --- a/vendor/github.com/pierrec/lz4/block.go +++ /dev/null @@ -1,454 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "errors" -) - -// block represents a frame data block. -// Used when compressing or decompressing frame blocks concurrently. -type block struct { - compressed bool - zdata []byte // compressed data - data []byte // decompressed data - offset int // offset within the data as with block dependency the 64Kb window is prepended to it - checksum uint32 // compressed data checksum - err error // error while [de]compressing -} - -var ( - // ErrInvalidSource is returned by UncompressBlock when a compressed block is corrupted. - ErrInvalidSource = errors.New("lz4: invalid source") - // ErrShortBuffer is returned by UncompressBlock, CompressBlock or CompressBlockHC when - // the supplied buffer for [de]compression is too small. - ErrShortBuffer = errors.New("lz4: short buffer") -) - -// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. -func CompressBlockBound(n int) int { - return n + n/255 + 16 -} - -// UncompressBlock decompresses the source buffer into the destination one, -// starting at the di index and returning the decompressed size. -// -// The destination buffer must be sized appropriately. -// -// An error is returned if the source data is invalid or the destination buffer is too small. -func UncompressBlock(src, dst []byte, di int) (int, error) { - si, sn, di0 := 0, len(src), di - if sn == 0 { - return 0, nil - } - - for { - // literals and match lengths (token) - lLen := int(src[si] >> 4) - mLen := int(src[si] & 0xF) - if si++; si == sn { - return di, ErrInvalidSource - } - - // literals - if lLen > 0 { - if lLen == 0xF { - for src[si] == 0xFF { - lLen += 0xFF - if si++; si == sn { - return di - di0, ErrInvalidSource - } - } - lLen += int(src[si]) - if si++; si == sn { - return di - di0, ErrInvalidSource - } - } - if len(dst)-di < lLen || si+lLen > sn { - return di - di0, ErrShortBuffer - } - di += copy(dst[di:], src[si:si+lLen]) - - if si += lLen; si >= sn { - return di - di0, nil - } - } - - if si += 2; si >= sn { - return di, ErrInvalidSource - } - offset := int(src[si-2]) | int(src[si-1])<<8 - if di-offset < 0 || offset == 0 { - return di - di0, ErrInvalidSource - } - - // match - if mLen == 0xF { - for src[si] == 0xFF { - mLen += 0xFF - if si++; si == sn { - return di - di0, ErrInvalidSource - } - } - mLen += int(src[si]) - if si++; si == sn { - return di - di0, ErrInvalidSource - } - } - // minimum match length is 4 - mLen += 4 - if len(dst)-di <= mLen { - return di - di0, ErrShortBuffer - } - - // copy the match (NB. match is at least 4 bytes long) - if mLen >= offset { - bytesToCopy := offset * (mLen / offset) - // Efficiently copy the match dst[di-offset:di] into the slice - // dst[di:di+bytesToCopy] - expanded := dst[di-offset : di+bytesToCopy] - n := offset - for n <= bytesToCopy+offset { - copy(expanded[n:], expanded[:n]) - n *= 2 - } - di += bytesToCopy - mLen -= bytesToCopy - } - - di += copy(dst[di:], dst[di-offset:di-offset+mLen]) - } -} - -// CompressBlock compresses the source buffer starting at soffet into the destination one. -// This is the fast version of LZ4 compression and also the default one. -// -// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. -// -// An error is returned if the destination buffer is too small. -func CompressBlock(src, dst []byte, soffset int) (int, error) { - sn, dn := len(src)-mfLimit, len(dst) - if sn <= 0 || dn == 0 || soffset >= sn { - return 0, nil - } - var si, di int - - // fast scan strategy: - // we only need a hash table to store the last sequences (4 bytes) - var hashTable [1 << hashLog]int - var hashShift = uint((minMatch * 8) - hashLog) - - // Initialise the hash table with the first 64Kb of the input buffer - // (used when compressing dependent blocks) - for si < soffset { - h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift - si++ - hashTable[h] = si - } - - anchor := si - fma := 1 << skipStrength - for si < sn-minMatch { - // hash the next 4 bytes (sequence)... - h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift - // -1 to separate existing entries from new ones - ref := hashTable[h] - 1 - // ...and store the position of the hash in the hash table (+1 to compensate the -1 upon saving) - hashTable[h] = si + 1 - // no need to check the last 3 bytes in the first literal 4 bytes as - // this guarantees that the next match, if any, is compressed with - // a lower size, since to have some compression we must have: - // ll+ml-overlap > 1 + (ll-15)/255 + (ml-4-15)/255 + 2 (uncompressed size>compressed size) - // => ll+ml>3+2*overlap => ll+ml>= 4+2*overlap - // and by definition we do have: - // ll >= 1, ml >= 4 - // => ll+ml >= 5 - // => so overlap must be 0 - - // the sequence is new, out of bound (64kb) or not valid: try next sequence - if ref < 0 || fma&(1<>winSizeLog > 0 || - src[ref] != src[si] || - src[ref+1] != src[si+1] || - src[ref+2] != src[si+2] || - src[ref+3] != src[si+3] { - // variable step: improves performance on non-compressible data - si += fma >> skipStrength - fma++ - continue - } - // match found - fma = 1 << skipStrength - lLen := si - anchor - offset := si - ref - - // encode match length part 1 - si += minMatch - mLen := si // match length has minMatch already - for si <= sn && src[si] == src[si-offset] { - si++ - } - mLen = si - mLen - if mLen < 0xF { - dst[di] = byte(mLen) - } else { - dst[di] = 0xF - } - - // encode literals length - if lLen < 0xF { - dst[di] |= byte(lLen << 4) - } else { - dst[di] |= 0xF0 - if di++; di == dn { - return di, ErrShortBuffer - } - l := lLen - 0xF - for ; l >= 0xFF; l -= 0xFF { - dst[di] = 0xFF - if di++; di == dn { - return di, ErrShortBuffer - } - } - dst[di] = byte(l) - } - if di++; di == dn { - return di, ErrShortBuffer - } - - // literals - if di+lLen >= dn { - return di, ErrShortBuffer - } - di += copy(dst[di:], src[anchor:anchor+lLen]) - anchor = si - - // encode offset - if di += 2; di >= dn { - return di, ErrShortBuffer - } - dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) - - // encode match length part 2 - if mLen >= 0xF { - for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { - dst[di] = 0xFF - if di++; di == dn { - return di, ErrShortBuffer - } - } - dst[di] = byte(mLen) - if di++; di == dn { - return di, ErrShortBuffer - } - } - } - - if anchor == 0 { - // incompressible - return 0, nil - } - - // last literals - lLen := len(src) - anchor - if lLen < 0xF { - dst[di] = byte(lLen << 4) - } else { - dst[di] = 0xF0 - if di++; di == dn { - return di, ErrShortBuffer - } - lLen -= 0xF - for ; lLen >= 0xFF; lLen -= 0xFF { - dst[di] = 0xFF - if di++; di == dn { - return di, ErrShortBuffer - } - } - dst[di] = byte(lLen) - } - if di++; di == dn { - return di, ErrShortBuffer - } - - // write literals - src = src[anchor:] - switch n := di + len(src); { - case n > dn: - return di, ErrShortBuffer - case n >= sn: - // incompressible - return 0, nil - } - di += copy(dst[di:], src) - return di, nil -} - -// CompressBlockHC compresses the source buffer starting at soffet into the destination one. -// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. -// -// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. -// -// An error is returned if the destination buffer is too small. -func CompressBlockHC(src, dst []byte, soffset int) (int, error) { - sn, dn := len(src)-mfLimit, len(dst) - if sn <= 0 || dn == 0 || soffset >= sn { - return 0, nil - } - var si, di int - - // Hash Chain strategy: - // we need a hash table and a chain table - // the chain table cannot contain more entries than the window size (64Kb entries) - var hashTable [1 << hashLog]int - var chainTable [winSize]int - var hashShift = uint((minMatch * 8) - hashLog) - - // Initialise the hash table with the first 64Kb of the input buffer - // (used when compressing dependent blocks) - for si < soffset { - h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift - chainTable[si&winMask] = hashTable[h] - si++ - hashTable[h] = si - } - - anchor := si - for si < sn-minMatch { - // hash the next 4 bytes (sequence)... - h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift - - // follow the chain until out of window and give the longest match - mLen := 0 - offset := 0 - for next := hashTable[h] - 1; next > 0 && next > si-winSize; next = chainTable[next&winMask] - 1 { - // the first (mLen==0) or next byte (mLen>=minMatch) at current match length must match to improve on the match length - if src[next+mLen] == src[si+mLen] { - for ml := 0; ; ml++ { - if src[next+ml] != src[si+ml] || si+ml > sn { - // found a longer match, keep its position and length - if mLen < ml && ml >= minMatch { - mLen = ml - offset = si - next - } - break - } - } - } - } - chainTable[si&winMask] = hashTable[h] - hashTable[h] = si + 1 - - // no match found - if mLen == 0 { - si++ - continue - } - - // match found - // update hash/chain tables with overlaping bytes: - // si already hashed, add everything from si+1 up to the match length - for si, ml := si+1, si+mLen; si < ml; { - h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift - chainTable[si&winMask] = hashTable[h] - si++ - hashTable[h] = si - } - - lLen := si - anchor - si += mLen - mLen -= minMatch // match length does not include minMatch - - if mLen < 0xF { - dst[di] = byte(mLen) - } else { - dst[di] = 0xF - } - - // encode literals length - if lLen < 0xF { - dst[di] |= byte(lLen << 4) - } else { - dst[di] |= 0xF0 - if di++; di == dn { - return di, ErrShortBuffer - } - l := lLen - 0xF - for ; l >= 0xFF; l -= 0xFF { - dst[di] = 0xFF - if di++; di == dn { - return di, ErrShortBuffer - } - } - dst[di] = byte(l) - } - if di++; di == dn { - return di, ErrShortBuffer - } - - // literals - if di+lLen >= dn { - return di, ErrShortBuffer - } - di += copy(dst[di:], src[anchor:anchor+lLen]) - anchor = si - - // encode offset - if di += 2; di >= dn { - return di, ErrShortBuffer - } - dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) - - // encode match length part 2 - if mLen >= 0xF { - for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { - dst[di] = 0xFF - if di++; di == dn { - return di, ErrShortBuffer - } - } - dst[di] = byte(mLen) - if di++; di == dn { - return di, ErrShortBuffer - } - } - } - - if anchor == 0 { - // incompressible - return 0, nil - } - - // last literals - lLen := len(src) - anchor - if lLen < 0xF { - dst[di] = byte(lLen << 4) - } else { - dst[di] = 0xF0 - if di++; di == dn { - return di, ErrShortBuffer - } - lLen -= 0xF - for ; lLen >= 0xFF; lLen -= 0xFF { - dst[di] = 0xFF - if di++; di == dn { - return di, ErrShortBuffer - } - } - dst[di] = byte(lLen) - } - if di++; di == dn { - return di, ErrShortBuffer - } - - // write literals - src = src[anchor:] - switch n := di + len(src); { - case n > dn: - return di, ErrShortBuffer - case n >= sn: - // incompressible - return 0, nil - } - di += copy(dst[di:], src) - return di, nil -} diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go deleted file mode 100644 index ddb82f66f8..0000000000 --- a/vendor/github.com/pierrec/lz4/lz4.go +++ /dev/null @@ -1,105 +0,0 @@ -// Package lz4 implements reading and writing lz4 compressed data (a frame), -// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, -// using an io.Reader (decompression) and io.Writer (compression). -// It is designed to minimize memory usage while maximizing throughput by being able to -// [de]compress data concurrently. -// -// The Reader and the Writer support concurrent processing provided the supplied buffers are -// large enough (in multiples of BlockMaxSize) and there is no block dependency. -// Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently. -// The runtime.GOMAXPROCS() value is used to apply concurrency or not. -// -// Although the block level compression and decompression functions are exposed and are fully compatible -// with the lz4 block format definition, they are low level and should not be used directly. -// For a complete description of an lz4 compressed block, see: -// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html -// -// See https://github.com/Cyan4973/lz4 for the reference C implementation. -package lz4 - -import ( - "hash" - "sync" - - "github.com/pierrec/xxHash/xxHash32" -) - -const ( - // Extension is the LZ4 frame file name extension - Extension = ".lz4" - // Version is the LZ4 frame format version - Version = 1 - - frameMagic = uint32(0x184D2204) - frameSkipMagic = uint32(0x184D2A50) - - // The following constants are used to setup the compression algorithm. - minMatch = 4 // the minimum size of the match sequence size (4 bytes) - winSizeLog = 16 // LZ4 64Kb window size limit - winSize = 1 << winSizeLog - winMask = winSize - 1 // 64Kb window of previous data for dependent blocks - - // hashLog determines the size of the hash table used to quickly find a previous match position. - // Its value influences the compression speed and memory usage, the lower the faster, - // but at the expense of the compression ratio. - // 16 seems to be the best compromise. - hashLog = 16 - hashTableSize = 1 << hashLog - hashShift = uint((minMatch * 8) - hashLog) - - mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes. - skipStrength = 6 // variable step for fast scan - - hasher = uint32(2654435761) // prime number used to hash minMatch -) - -// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. -var bsMapID = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20} -var bsMapValue = map[int]byte{} - -// Reversed. -func init() { - for i, v := range bsMapID { - bsMapValue[v] = i - } -} - -// Header describes the various flags that can be set on a Writer or obtained from a Reader. -// The default values match those of the LZ4 frame format definition (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). -// -// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. -// It is the caller responsibility to check them if necessary (typically when using the Reader concurrency). -type Header struct { - BlockDependency bool // compressed blocks are dependent (one block depends on the last 64Kb of the previous one) - BlockChecksum bool // compressed blocks are checksumed - NoChecksum bool // frame checksum - BlockMaxSize int // the size of the decompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. - Size uint64 // the frame total size. It is _not_ computed by the Writer. - HighCompression bool // use high compression (only for the Writer) - done bool // whether the descriptor was processed (Read or Write and checked) - // Removed as not supported - // Dict bool // a dictionary id is to be used - // DictID uint32 // the dictionary id read from the frame, if any. -} - -// xxhPool wraps the standard pool for xxHash items. -// Putting items back in the pool automatically resets them. -type xxhPool struct { - sync.Pool -} - -func (p *xxhPool) Get() hash.Hash32 { - return p.Pool.Get().(hash.Hash32) -} - -func (p *xxhPool) Put(h hash.Hash32) { - h.Reset() - p.Pool.Put(h) -} - -// hashPool is used by readers and writers and contains xxHash items. -var hashPool = xxhPool{ - Pool: sync.Pool{ - New: func() interface{} { return xxHash32.New(0) }, - }, -} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go deleted file mode 100644 index 9f7fd60424..0000000000 --- a/vendor/github.com/pierrec/lz4/reader.go +++ /dev/null @@ -1,364 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" - "runtime" - "sync" - "sync/atomic" -) - -// ErrInvalid is returned when the data being read is not an LZ4 archive -// (LZ4 magic number detection failed). -var ErrInvalid = errors.New("invalid lz4 data") - -// errEndOfBlock is returned by readBlock when it has reached the last block of the frame. -// It is not an error. -var errEndOfBlock = errors.New("end of block") - -// Reader implements the LZ4 frame decoder. -// The Header is set after the first call to Read(). -// The Header may change between Read() calls in case of concatenated frames. -type Reader struct { - Pos int64 // position within the source - Header - src io.Reader - checksum hash.Hash32 // frame hash - wg sync.WaitGroup // decompressing go routine wait group - data []byte // buffered decompressed data - window []byte // 64Kb decompressed data window -} - -// NewReader returns a new LZ4 frame decoder. -// No access to the underlying io.Reader is performed. -func NewReader(src io.Reader) *Reader { - return &Reader{ - src: src, - checksum: hashPool.Get(), - } -} - -// readHeader checks the frame magic number and parses the frame descriptoz. -// Skippable frames are supported even as a first frame although the LZ4 -// specifications recommends skippable frames not to be used as first frames. -func (z *Reader) readHeader(first bool) error { - defer z.checksum.Reset() - - for { - var magic uint32 - if err := binary.Read(z.src, binary.LittleEndian, &magic); err != nil { - if !first && err == io.ErrUnexpectedEOF { - return io.EOF - } - return err - } - z.Pos += 4 - if magic>>8 == frameSkipMagic>>8 { - var skipSize uint32 - if err := binary.Read(z.src, binary.LittleEndian, &skipSize); err != nil { - return err - } - z.Pos += 4 - m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) - z.Pos += m - if err != nil { - return err - } - continue - } - if magic != frameMagic { - return ErrInvalid - } - break - } - - // header - var buf [8]byte - if _, err := io.ReadFull(z.src, buf[:2]); err != nil { - return err - } - z.Pos += 2 - - b := buf[0] - if b>>6 != Version { - return fmt.Errorf("lz4.Read: invalid version: got %d expected %d", b>>6, Version) - } - z.BlockDependency = b>>5&1 == 0 - z.BlockChecksum = b>>4&1 > 0 - frameSize := b>>3&1 > 0 - z.NoChecksum = b>>2&1 == 0 - // z.Dict = b&1 > 0 - - bmsID := buf[1] >> 4 & 0x7 - bSize, ok := bsMapID[bmsID] - if !ok { - return fmt.Errorf("lz4.Read: invalid block max size: %d", bmsID) - } - z.BlockMaxSize = bSize - - z.checksum.Write(buf[0:2]) - - if frameSize { - if err := binary.Read(z.src, binary.LittleEndian, &z.Size); err != nil { - return err - } - z.Pos += 8 - binary.LittleEndian.PutUint64(buf[:], z.Size) - z.checksum.Write(buf[0:8]) - } - - // if z.Dict { - // if err := binary.Read(z.src, binary.LittleEndian, &z.DictID); err != nil { - // return err - // } - // z.Pos += 4 - // binary.LittleEndian.PutUint32(buf[:], z.DictID) - // z.checksum.Write(buf[0:4]) - // } - - // header checksum - if _, err := io.ReadFull(z.src, buf[:1]); err != nil { - return err - } - z.Pos++ - if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { - return fmt.Errorf("lz4.Read: invalid header checksum: got %v expected %v", buf[0], h) - } - - z.Header.done = true - - return nil -} - -// Read decompresses data from the underlying source into the supplied buffer. -// -// Since there can be multiple streams concatenated, Header values may -// change between calls to Read(). If that is the case, no data is actually read from -// the underlying io.Reader, to allow for potential input buffer resizing. -// -// Data is buffered if the input buffer is too small, and exhausted upon successive calls. -// -// If the buffer is large enough (typically in multiples of BlockMaxSize) and there is -// no block dependency, then the data will be decompressed concurrently based on the GOMAXPROCS value. -func (z *Reader) Read(buf []byte) (n int, err error) { - if !z.Header.done { - if err = z.readHeader(true); err != nil { - return - } - } - - if len(buf) == 0 { - return - } - - // exhaust remaining data from previous Read() - if len(z.data) > 0 { - n = copy(buf, z.data) - z.data = z.data[n:] - if len(z.data) == 0 { - z.data = nil - } - return - } - - // Break up the input buffer into BlockMaxSize blocks with at least one block. - // Then decompress into each of them concurrently if possible (no dependency). - // In case of dependency, the first block will be missing the window (except on the - // very first call), the rest will have it already since it comes from the previous block. - wbuf := buf - zn := (len(wbuf) + z.BlockMaxSize - 1) / z.BlockMaxSize - zblocks := make([]block, zn) - for zi, abort := 0, uint32(0); zi < zn && atomic.LoadUint32(&abort) == 0; zi++ { - zb := &zblocks[zi] - // last block may be too small - if len(wbuf) < z.BlockMaxSize+len(z.window) { - wbuf = make([]byte, z.BlockMaxSize+len(z.window)) - } - copy(wbuf, z.window) - if zb.err = z.readBlock(wbuf, zb); zb.err != nil { - break - } - wbuf = wbuf[z.BlockMaxSize:] - if !z.BlockDependency { - z.wg.Add(1) - go z.decompressBlock(zb, &abort) - continue - } - // cannot decompress concurrently when dealing with block dependency - z.decompressBlock(zb, nil) - // the last block may not contain enough data - if len(z.window) == 0 { - z.window = make([]byte, winSize) - } - if len(zb.data) >= winSize { - copy(z.window, zb.data[len(zb.data)-winSize:]) - } else { - copy(z.window, z.window[len(zb.data):]) - copy(z.window[len(zb.data)+1:], zb.data) - } - } - z.wg.Wait() - - // since a block size may be less then BlockMaxSize, trim the decompressed buffers - for _, zb := range zblocks { - if zb.err != nil { - if zb.err == errEndOfBlock { - return n, z.close() - } - return n, zb.err - } - bLen := len(zb.data) - if !z.NoChecksum { - z.checksum.Write(zb.data) - } - m := copy(buf[n:], zb.data) - // buffer the remaining data (this is necessarily the last block) - if m < bLen { - z.data = zb.data[m:] - } - n += m - } - - return -} - -// readBlock reads an entire frame block from the frame. -// The input buffer is the one that will receive the decompressed data. -// If the end of the frame is detected, it returns the errEndOfBlock error. -func (z *Reader) readBlock(buf []byte, b *block) error { - var bLen uint32 - if err := binary.Read(z.src, binary.LittleEndian, &bLen); err != nil { - return err - } - atomic.AddInt64(&z.Pos, 4) - - switch { - case bLen == 0: - return errEndOfBlock - case bLen&(1<<31) == 0: - b.compressed = true - b.data = buf - b.zdata = make([]byte, bLen) - default: - bLen = bLen & (1<<31 - 1) - if int(bLen) > len(buf) { - return fmt.Errorf("lz4.Read: invalid block size: %d", bLen) - } - b.data = buf[:bLen] - b.zdata = buf[:bLen] - } - if _, err := io.ReadFull(z.src, b.zdata); err != nil { - return err - } - - if z.BlockChecksum { - if err := binary.Read(z.src, binary.LittleEndian, &b.checksum); err != nil { - return err - } - xxh := hashPool.Get() - defer hashPool.Put(xxh) - xxh.Write(b.zdata) - if h := xxh.Sum32(); h != b.checksum { - return fmt.Errorf("lz4.Read: invalid block checksum: got %x expected %x", h, b.checksum) - } - } - - return nil -} - -// decompressBlock decompresses a frame block. -// In case of an error, the block err is set with it and abort is set to 1. -func (z *Reader) decompressBlock(b *block, abort *uint32) { - if abort != nil { - defer z.wg.Done() - } - if b.compressed { - n := len(z.window) - m, err := UncompressBlock(b.zdata, b.data, n) - if err != nil { - if abort != nil { - atomic.StoreUint32(abort, 1) - } - b.err = err - return - } - b.data = b.data[n : n+m] - } - atomic.AddInt64(&z.Pos, int64(len(b.data))) -} - -// close validates the frame checksum (if any) and checks the next frame (if any). -func (z *Reader) close() error { - if !z.NoChecksum { - var checksum uint32 - if err := binary.Read(z.src, binary.LittleEndian, &checksum); err != nil { - return err - } - if checksum != z.checksum.Sum32() { - return fmt.Errorf("lz4.Read: invalid frame checksum: got %x expected %x", z.checksum.Sum32(), checksum) - } - } - - // get ready for the next concatenated frame, but do not change the position - pos := z.Pos - z.Reset(z.src) - z.Pos = pos - - // since multiple frames can be concatenated, check for another one - return z.readHeader(false) -} - -// Reset discards the Reader's state and makes it equivalent to the -// result of its original state from NewReader, but reading from r instead. -// This permits reusing a Reader rather than allocating a new one. -func (z *Reader) Reset(r io.Reader) { - z.Header = Header{} - z.Pos = 0 - z.src = r - z.checksum.Reset() - z.data = nil - z.window = nil -} - -// WriteTo decompresses the data from the underlying io.Reader and writes it to the io.Writer. -// Returns the number of bytes written. -func (z *Reader) WriteTo(w io.Writer) (n int64, err error) { - cpus := runtime.GOMAXPROCS(0) - var buf []byte - - // The initial buffer being nil, the first Read will be only read the compressed frame options. - // The buffer can then be sized appropriately to support maximum concurrency decompression. - // If multiple frames are concatenated, Read() will return with no data decompressed but with - // potentially changed options. The buffer will be resized accordingly, always trying to - // maximize concurrency. - for { - nsize := 0 - // the block max size can change if multiple streams are concatenated. - // Check it after every Read(). - if z.BlockDependency { - // in case of dependency, we cannot decompress concurrently, - // so allocate the minimum buffer + window size - nsize = len(z.window) + z.BlockMaxSize - } else { - // if no dependency, allocate a buffer large enough for concurrent decompression - nsize = cpus * z.BlockMaxSize - } - if nsize != len(buf) { - buf = make([]byte, nsize) - } - - m, er := z.Read(buf) - if er != nil && er != io.EOF { - return n, er - } - m, err = w.Write(buf[:m]) - n += int64(m) - if err != nil || er == io.EOF { - return - } - } -} diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go deleted file mode 100644 index b1b712fe21..0000000000 --- a/vendor/github.com/pierrec/lz4/writer.go +++ /dev/null @@ -1,377 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "fmt" - "hash" - "io" - "runtime" -) - -// Writer implements the LZ4 frame encoder. -type Writer struct { - Header - dst io.Writer - checksum hash.Hash32 // frame checksum - data []byte // data to be compressed, only used when dealing with block dependency as we need 64Kb to work with - window []byte // last 64KB of decompressed data (block dependency) + blockMaxSize buffer - - zbCompressBuf []byte // buffer for compressing lz4 blocks - writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock -} - -// NewWriter returns a new LZ4 frame encoder. -// No access to the underlying io.Writer is performed. -// The supplied Header is checked at the first Write. -// It is ok to change it before the first Write but then not until a Reset() is performed. -func NewWriter(dst io.Writer) *Writer { - return &Writer{ - dst: dst, - checksum: hashPool.Get(), - Header: Header{ - BlockMaxSize: 4 << 20, - }, - writeSizeBuf: make([]byte, 4), - } -} - -// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. -func (z *Writer) writeHeader() error { - // Default to 4Mb if BlockMaxSize is not set - if z.Header.BlockMaxSize == 0 { - z.Header.BlockMaxSize = 4 << 20 - } - // the only option that need to be validated - bSize, ok := bsMapValue[z.Header.BlockMaxSize] - if !ok { - return fmt.Errorf("lz4: invalid block max size: %d", z.Header.BlockMaxSize) - } - - // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes - // Size and DictID are optional - var buf [19]byte - - // set the fixed size data: magic number, block max size and flags - binary.LittleEndian.PutUint32(buf[0:], frameMagic) - flg := byte(Version << 6) - if !z.Header.BlockDependency { - flg |= 1 << 5 - } - if z.Header.BlockChecksum { - flg |= 1 << 4 - } - if z.Header.Size > 0 { - flg |= 1 << 3 - } - if !z.Header.NoChecksum { - flg |= 1 << 2 - } - // if z.Header.Dict { - // flg |= 1 - // } - buf[4] = flg - buf[5] = bSize << 4 - - // current buffer size: magic(4) + flags(1) + block max size (1) - n := 6 - // optional items - if z.Header.Size > 0 { - binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) - n += 8 - } - // if z.Header.Dict { - // binary.LittleEndian.PutUint32(buf[n:], z.Header.DictID) - // n += 4 - // } - - // header checksum includes the flags, block max size and optional Size and DictID - z.checksum.Write(buf[4:n]) - buf[n] = byte(z.checksum.Sum32() >> 8 & 0xFF) - z.checksum.Reset() - - // header ready, write it out - if _, err := z.dst.Write(buf[0 : n+1]); err != nil { - return err - } - z.Header.done = true - - // initialize buffers dependent on header info - z.zbCompressBuf = make([]byte, winSize+z.BlockMaxSize) - - return nil -} - -// Write compresses data from the supplied buffer into the underlying io.Writer. -// Write does not return until the data has been written. -// -// If the input buffer is large enough (typically in multiples of BlockMaxSize) -// the data will be compressed concurrently. -// -// Write never buffers any data unless in BlockDependency mode where it may -// do so until it has 64Kb of data, after which it never buffers any. -func (z *Writer) Write(buf []byte) (n int, err error) { - if !z.Header.done { - if err = z.writeHeader(); err != nil { - return - } - } - - if len(buf) == 0 { - return - } - - if !z.NoChecksum { - z.checksum.Write(buf) - } - - // with block dependency, require at least 64Kb of data to work with - // not having 64Kb only matters initially to setup the first window - bl := 0 - if z.BlockDependency && len(z.window) == 0 { - bl = len(z.data) - z.data = append(z.data, buf...) - if len(z.data) < winSize { - return len(buf), nil - } - buf = z.data - z.data = nil - } - - // Break up the input buffer into BlockMaxSize blocks, provisioning the left over block. - // Then compress into each of them concurrently if possible (no dependency). - var ( - zb block - wbuf = buf - zn = len(wbuf) / z.BlockMaxSize - zi = 0 - leftover = len(buf) % z.BlockMaxSize - ) - -loop: - for zi < zn { - if z.BlockDependency { - if zi == 0 { - // first block does not have the window - zb.data = append(z.window, wbuf[:z.BlockMaxSize]...) - zb.offset = len(z.window) - wbuf = wbuf[z.BlockMaxSize-winSize:] - } else { - // set the uncompressed data including the window from previous block - zb.data = wbuf[:z.BlockMaxSize+winSize] - zb.offset = winSize - wbuf = wbuf[z.BlockMaxSize:] - } - } else { - zb.data = wbuf[:z.BlockMaxSize] - wbuf = wbuf[z.BlockMaxSize:] - } - - goto write - } - - // left over - if leftover > 0 { - zb = block{data: wbuf} - if z.BlockDependency { - if zn == 0 { - zb.data = append(z.window, zb.data...) - zb.offset = len(z.window) - } else { - zb.offset = winSize - } - } - - leftover = 0 - goto write - } - - if z.BlockDependency { - if len(z.window) == 0 { - z.window = make([]byte, winSize) - } - // last buffer may be shorter than the window - if len(buf) >= winSize { - copy(z.window, buf[len(buf)-winSize:]) - } else { - copy(z.window, z.window[len(buf):]) - copy(z.window[len(buf)+1:], buf) - } - } - - return - -write: - zb = z.compressBlock(zb) - _, err = z.writeBlock(zb) - - written := len(zb.data) - if bl > 0 { - if written >= bl { - written -= bl - bl = 0 - } else { - bl -= written - written = 0 - } - } - - n += written - // remove the window in zb.data - if z.BlockDependency { - if zi == 0 { - n -= len(z.window) - } else { - n -= winSize - } - } - if err != nil { - return - } - zi++ - goto loop -} - -// compressBlock compresses a block. -func (z *Writer) compressBlock(zb block) block { - // compressed block size cannot exceed the input's - var ( - n int - err error - zbuf = z.zbCompressBuf - ) - if z.HighCompression { - n, err = CompressBlockHC(zb.data, zbuf, zb.offset) - } else { - n, err = CompressBlock(zb.data, zbuf, zb.offset) - } - - // compressible and compressed size smaller than decompressed: ok! - if err == nil && n > 0 && len(zb.zdata) < len(zb.data) { - zb.compressed = true - zb.zdata = zbuf[:n] - } else { - zb.compressed = false - zb.zdata = zb.data[zb.offset:] - } - - if z.BlockChecksum { - xxh := hashPool.Get() - xxh.Write(zb.zdata) - zb.checksum = xxh.Sum32() - hashPool.Put(xxh) - } - - return zb -} - -// writeBlock writes a frame block to the underlying io.Writer (size, data). -func (z *Writer) writeBlock(zb block) (int, error) { - bLen := uint32(len(zb.zdata)) - if !zb.compressed { - bLen |= 1 << 31 - } - - n := 0 - - binary.LittleEndian.PutUint32(z.writeSizeBuf, bLen) - n, err := z.dst.Write(z.writeSizeBuf) - if err != nil { - return n, err - } - - m, err := z.dst.Write(zb.zdata) - n += m - if err != nil { - return n, err - } - - if z.BlockChecksum { - binary.LittleEndian.PutUint32(z.writeSizeBuf, zb.checksum) - m, err := z.dst.Write(z.writeSizeBuf) - n += m - - if err != nil { - return n, err - } - } - - return n, nil -} - -// Flush flushes any pending compressed data to the underlying writer. -// Flush does not return until the data has been written. -// If the underlying writer returns an error, Flush returns that error. -// -// Flush is only required when in BlockDependency mode and the total of -// data written is less than 64Kb. -func (z *Writer) Flush() error { - if len(z.data) == 0 { - return nil - } - - zb := z.compressBlock(block{data: z.data}) - if _, err := z.writeBlock(zb); err != nil { - return err - } - return nil -} - -// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. -func (z *Writer) Close() error { - if !z.Header.done { - if err := z.writeHeader(); err != nil { - return err - } - } - - // buffered data for the block dependency window - if z.BlockDependency && len(z.data) > 0 { - zb := block{data: z.data} - if _, err := z.writeBlock(z.compressBlock(zb)); err != nil { - return err - } - } - - if err := binary.Write(z.dst, binary.LittleEndian, uint32(0)); err != nil { - return err - } - if !z.NoChecksum { - if err := binary.Write(z.dst, binary.LittleEndian, z.checksum.Sum32()); err != nil { - return err - } - } - return nil -} - -// Reset clears the state of the Writer z such that it is equivalent to its -// initial state from NewWriter, but instead writing to w. -// No access to the underlying io.Writer is performed. -func (z *Writer) Reset(w io.Writer) { - z.Header = Header{} - z.dst = w - z.checksum.Reset() - z.data = nil - z.window = nil -} - -// ReadFrom compresses the data read from the io.Reader and writes it to the underlying io.Writer. -// Returns the number of bytes read. -// It does not close the Writer. -func (z *Writer) ReadFrom(r io.Reader) (n int64, err error) { - cpus := runtime.GOMAXPROCS(0) - buf := make([]byte, cpus*z.BlockMaxSize) - for { - m, er := io.ReadFull(r, buf) - n += int64(m) - if er == nil || er == io.ErrUnexpectedEOF || er == io.EOF { - if _, err = z.Write(buf[:m]); err != nil { - return - } - if er == nil { - continue - } - return - } - return n, er - } -} diff --git a/vendor/github.com/pierrec/xxHash/LICENSE b/vendor/github.com/pierrec/xxHash/LICENSE deleted file mode 100644 index c1418f3f67..0000000000 --- a/vendor/github.com/pierrec/xxHash/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2014, Pierre Curto -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of xxHash nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go deleted file mode 100644 index 411504e4bb..0000000000 --- a/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go +++ /dev/null @@ -1,205 +0,0 @@ -// Package xxHash32 implements the very fast xxHash hashing algorithm (32 bits version). -// (https://github.com/Cyan4973/xxHash/) -package xxHash32 - -import "hash" - -const ( - prime32_1 = 2654435761 - prime32_2 = 2246822519 - prime32_3 = 3266489917 - prime32_4 = 668265263 - prime32_5 = 374761393 -) - -type xxHash struct { - seed uint32 - v1 uint32 - v2 uint32 - v3 uint32 - v4 uint32 - totalLen uint64 - buf [16]byte - bufused int -} - -// New returns a new Hash32 instance. -func New(seed uint32) hash.Hash32 { - xxh := &xxHash{seed: seed} - xxh.Reset() - return xxh -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (xxh xxHash) Sum(b []byte) []byte { - h32 := xxh.Sum32() - return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) -} - -// Reset resets the Hash to its initial state. -func (xxh *xxHash) Reset() { - xxh.v1 = xxh.seed + prime32_1 + prime32_2 - xxh.v2 = xxh.seed + prime32_2 - xxh.v3 = xxh.seed - xxh.v4 = xxh.seed - prime32_1 - xxh.totalLen = 0 - xxh.bufused = 0 -} - -// Size returns the number of bytes returned by Sum(). -func (xxh *xxHash) Size() int { - return 4 -} - -// BlockSize gives the minimum number of bytes accepted by Write(). -func (xxh *xxHash) BlockSize() int { - return 1 -} - -// Write adds input bytes to the Hash. -// It never returns an error. -func (xxh *xxHash) Write(input []byte) (int, error) { - n := len(input) - m := xxh.bufused - - xxh.totalLen += uint64(n) - - r := len(xxh.buf) - m - if n < r { - copy(xxh.buf[m:], input) - xxh.bufused += len(input) - return n, nil - } - - p := 0 - if m > 0 { - // some data left from previous update - copy(xxh.buf[xxh.bufused:], input[:r]) - xxh.bufused += len(input) - r - - // fast rotl(13) - p32 := xxh.v1 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 - xxh.v1 = (p32<<13 | p32>>19) * prime32_1 - p += 4 - p32 = xxh.v2 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 - xxh.v2 = (p32<<13 | p32>>19) * prime32_1 - p += 4 - p32 = xxh.v3 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 - xxh.v3 = (p32<<13 | p32>>19) * prime32_1 - p += 4 - p32 = xxh.v4 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 - xxh.v4 = (p32<<13 | p32>>19) * prime32_1 - - p = r - xxh.bufused = 0 - } - - for n := n - 16; p <= n; { - p32 := xxh.v1 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 - xxh.v1 = (p32<<13 | p32>>19) * prime32_1 - p += 4 - p32 = xxh.v2 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 - xxh.v2 = (p32<<13 | p32>>19) * prime32_1 - p += 4 - p32 = xxh.v3 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 - xxh.v3 = (p32<<13 | p32>>19) * prime32_1 - p += 4 - p32 = xxh.v4 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 - xxh.v4 = (p32<<13 | p32>>19) * prime32_1 - p += 4 - } - - copy(xxh.buf[xxh.bufused:], input[p:]) - xxh.bufused += len(input) - p - - return n, nil -} - -// Sum32 returns the 32 bits Hash value. -func (xxh *xxHash) Sum32() uint32 { - h32 := uint32(xxh.totalLen) - if xxh.totalLen >= 16 { - h32 += ((xxh.v1 << 1) | (xxh.v1 >> 31)) + - ((xxh.v2 << 7) | (xxh.v2 >> 25)) + - ((xxh.v3 << 12) | (xxh.v3 >> 20)) + - ((xxh.v4 << 18) | (xxh.v4 >> 14)) - } else { - h32 += xxh.seed + prime32_5 - } - - p := 0 - n := xxh.bufused - for n := n - 4; p <= n; p += 4 { - h32 += (uint32(xxh.buf[p+3])<<24 | uint32(xxh.buf[p+2])<<16 | uint32(xxh.buf[p+1])<<8 | uint32(xxh.buf[p])) * prime32_3 - h32 = ((h32 << 17) | (h32 >> 15)) * prime32_4 - } - for ; p < n; p++ { - h32 += uint32(xxh.buf[p]) * prime32_5 - h32 = ((h32 << 11) | (h32 >> 21)) * prime32_1 - } - - h32 ^= h32 >> 15 - h32 *= prime32_2 - h32 ^= h32 >> 13 - h32 *= prime32_3 - h32 ^= h32 >> 16 - - return h32 -} - -// Checksum returns the 32bits Hash value. -func Checksum(input []byte, seed uint32) uint32 { - n := len(input) - h32 := uint32(n) - - if n < 16 { - h32 += seed + prime32_5 - } else { - v1 := seed + prime32_1 + prime32_2 - v2 := seed + prime32_2 - v3 := seed - v4 := seed - prime32_1 - p := 0 - for p <= n-16 { - v1 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 - v1 = (v1<<13 | v1>>19) * prime32_1 - p += 4 - v2 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 - v2 = (v2<<13 | v2>>19) * prime32_1 - p += 4 - v3 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 - v3 = (v3<<13 | v3>>19) * prime32_1 - p += 4 - v4 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 - v4 = (v4<<13 | v4>>19) * prime32_1 - p += 4 - } - input = input[p:] - n -= p - h32 += ((v1 << 1) | (v1 >> 31)) + - ((v2 << 7) | (v2 >> 25)) + - ((v3 << 12) | (v3 >> 20)) + - ((v4 << 18) | (v4 >> 14)) - } - - p := 0 - for p <= n-4 { - h32 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_3 - h32 = ((h32 << 17) | (h32 >> 15)) * prime32_4 - p += 4 - } - for p < n { - h32 += uint32(input[p]) * prime32_5 - h32 = ((h32 << 11) | (h32 >> 21)) * prime32_1 - p++ - } - - h32 ^= h32 >> 15 - h32 *= prime32_2 - h32 ^= h32 >> 13 - h32 *= prime32_3 - h32 ^= h32 >> 16 - - return h32 -} diff --git a/vendor/github.com/rcrowley/go-metrics/.gitignore b/vendor/github.com/rcrowley/go-metrics/.gitignore deleted file mode 100644 index 83c8f82374..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -*.[68] -*.a -*.out -*.swp -_obj -_testmain.go -cmd/metrics-bench/metrics-bench -cmd/metrics-example/metrics-example -cmd/never-read/never-read diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml deleted file mode 100644 index f8b3b2ed28..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go: - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 - -script: - - ./validate.sh - -# this should give us faster builds according to -# http://docs.travis-ci.com/user/migrating-from-legacy/ -sudo: false diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE deleted file mode 100644 index 363fa9ee77..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -Copyright 2012 Richard Crowley. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - -THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE. - -The views and conclusions contained in the software and documentation -are those of the authors and should not be interpreted as representing -official policies, either expressed or implied, of Richard Crowley. diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md deleted file mode 100644 index bc2a45a838..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/README.md +++ /dev/null @@ -1,166 +0,0 @@ -go-metrics -========== - -![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master) - -Go port of Coda Hale's Metrics library: . - -Documentation: . - -Usage ------ - -Create and update metrics: - -```go -c := metrics.NewCounter() -metrics.Register("foo", c) -c.Inc(47) - -g := metrics.NewGauge() -metrics.Register("bar", g) -g.Update(47) - -r := NewRegistry() -g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() }) - -s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028) -h := metrics.NewHistogram(s) -metrics.Register("baz", h) -h.Update(47) - -m := metrics.NewMeter() -metrics.Register("quux", m) -m.Mark(47) - -t := metrics.NewTimer() -metrics.Register("bang", t) -t.Time(func() {}) -t.Update(47) -``` - -Register() is not threadsafe. For threadsafe metric registration use -GetOrRegister: - -```go -t := metrics.GetOrRegisterTimer("account.create.latency", nil) -t.Time(func() {}) -t.Update(47) -``` - -**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will -leak memory: - -```go -// Will call Stop() on the Meter to allow for garbage collection -metrics.Unregister("quux") -// Or similarly for a Timer that embeds a Meter -metrics.Unregister("bang") -``` - -Periodically log every metric in human-readable form to standard error: - -```go -go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) -``` - -Periodically log every metric in slightly-more-parseable form to syslog: - -```go -w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics") -go metrics.Syslog(metrics.DefaultRegistry, 60e9, w) -``` - -Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite): - -```go - -import "github.com/cyberdelia/go-metrics-graphite" - -addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003") -go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr) -``` - -Periodically emit every metric into InfluxDB: - -**NOTE:** this has been pulled out of the library due to constant fluctuations -in the InfluxDB API. In fact, all client libraries are on their way out. see -issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and -[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details. - -```go -import "github.com/vrischmann/go-metrics-influxdb" - -go influxdb.InfluxDB(metrics.DefaultRegistry, - 10e9, - "127.0.0.1:8086", - "database-name", - "username", - "password" -) -``` - -Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato): - -**Note**: the client included with this repository under the `librato` package -has been deprecated and moved to the repository linked above. - -```go -import "github.com/mihasya/go-metrics-librato" - -go librato.Librato(metrics.DefaultRegistry, - 10e9, // interval - "example@example.com", // account owner email address - "token", // Librato API token - "hostname", // source - []float64{0.95}, // percentiles to send - time.Millisecond, // time unit -) -``` - -Periodically emit every metric to StatHat: - -```go -import "github.com/rcrowley/go-metrics/stathat" - -go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com") -``` - -Maintain all metrics along with expvars at `/debug/metrics`: - -This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/) -but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars -as well as all your go-metrics. - - -```go -import "github.com/rcrowley/go-metrics/exp" - -exp.Exp(metrics.DefaultRegistry) -``` - -Installation ------------- - -```sh -go get github.com/rcrowley/go-metrics -``` - -StatHat support additionally requires their Go client: - -```sh -go get github.com/stathat/go -``` - -Publishing Metrics ------------------- - -Clients are available for the following destinations: - -* Librato - https://github.com/mihasya/go-metrics-librato -* Graphite - https://github.com/cyberdelia/go-metrics-graphite -* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb -* Ganglia - https://github.com/appscode/metlia -* Prometheus - https://github.com/deathowl/go-metrics-prometheus -* DataDog - https://github.com/syntaqx/go-metrics-datadog -* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go deleted file mode 100644 index bb7b039cb5..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/counter.go +++ /dev/null @@ -1,112 +0,0 @@ -package metrics - -import "sync/atomic" - -// Counters hold an int64 value that can be incremented and decremented. -type Counter interface { - Clear() - Count() int64 - Dec(int64) - Inc(int64) - Snapshot() Counter -} - -// GetOrRegisterCounter returns an existing Counter or constructs and registers -// a new StandardCounter. -func GetOrRegisterCounter(name string, r Registry) Counter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewCounter).(Counter) -} - -// NewCounter constructs a new StandardCounter. -func NewCounter() Counter { - if UseNilMetrics { - return NilCounter{} - } - return &StandardCounter{0} -} - -// NewRegisteredCounter constructs and registers a new StandardCounter. -func NewRegisteredCounter(name string, r Registry) Counter { - c := NewCounter() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// CounterSnapshot is a read-only copy of another Counter. -type CounterSnapshot int64 - -// Clear panics. -func (CounterSnapshot) Clear() { - panic("Clear called on a CounterSnapshot") -} - -// Count returns the count at the time the snapshot was taken. -func (c CounterSnapshot) Count() int64 { return int64(c) } - -// Dec panics. -func (CounterSnapshot) Dec(int64) { - panic("Dec called on a CounterSnapshot") -} - -// Inc panics. -func (CounterSnapshot) Inc(int64) { - panic("Inc called on a CounterSnapshot") -} - -// Snapshot returns the snapshot. -func (c CounterSnapshot) Snapshot() Counter { return c } - -// NilCounter is a no-op Counter. -type NilCounter struct{} - -// Clear is a no-op. -func (NilCounter) Clear() {} - -// Count is a no-op. -func (NilCounter) Count() int64 { return 0 } - -// Dec is a no-op. -func (NilCounter) Dec(i int64) {} - -// Inc is a no-op. -func (NilCounter) Inc(i int64) {} - -// Snapshot is a no-op. -func (NilCounter) Snapshot() Counter { return NilCounter{} } - -// StandardCounter is the standard implementation of a Counter and uses the -// sync/atomic package to manage a single int64 value. -type StandardCounter struct { - count int64 -} - -// Clear sets the counter to zero. -func (c *StandardCounter) Clear() { - atomic.StoreInt64(&c.count, 0) -} - -// Count returns the current count. -func (c *StandardCounter) Count() int64 { - return atomic.LoadInt64(&c.count) -} - -// Dec decrements the counter by the given amount. -func (c *StandardCounter) Dec(i int64) { - atomic.AddInt64(&c.count, -i) -} - -// Inc increments the counter by the given amount. -func (c *StandardCounter) Inc(i int64) { - atomic.AddInt64(&c.count, i) -} - -// Snapshot returns a read-only copy of the counter. -func (c *StandardCounter) Snapshot() Counter { - return CounterSnapshot(c.Count()) -} diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go deleted file mode 100644 index 043ccefab6..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/debug.go +++ /dev/null @@ -1,76 +0,0 @@ -package metrics - -import ( - "runtime/debug" - "time" -) - -var ( - debugMetrics struct { - GCStats struct { - LastGC Gauge - NumGC Gauge - Pause Histogram - //PauseQuantiles Histogram - PauseTotal Gauge - } - ReadGCStats Timer - } - gcStats debug.GCStats -) - -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called as a goroutine. -func CaptureDebugGCStats(r Registry, d time.Duration) { - for _ = range time.Tick(d) { - CaptureDebugGCStatsOnce(r) - } -} - -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called in a background goroutine. -// Giving a registry which has not been given to RegisterDebugGCStats will -// panic. -// -// Be careful (but much less so) with this because debug.ReadGCStats calls -// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world -// operation, isn't something you want to be doing all the time. -func CaptureDebugGCStatsOnce(r Registry) { - lastGC := gcStats.LastGC - t := time.Now() - debug.ReadGCStats(&gcStats) - debugMetrics.ReadGCStats.UpdateSince(t) - - debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano())) - debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC)) - if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) { - debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0])) - } - //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles) - debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal)) -} - -// Register metrics for the Go garbage collector statistics exported in -// debug.GCStats. The metrics are named by their fully-qualified Go symbols, -// i.e. debug.GCStats.PauseTotal. -func RegisterDebugGCStats(r Registry) { - debugMetrics.GCStats.LastGC = NewGauge() - debugMetrics.GCStats.NumGC = NewGauge() - debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) - //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) - debugMetrics.GCStats.PauseTotal = NewGauge() - debugMetrics.ReadGCStats = NewTimer() - - r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) - r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) - r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) - //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) - r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) - r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) -} - -// Allocate an initial slice for gcStats.Pause to avoid allocations during -// normal operation. -func init() { - gcStats.Pause = make([]time.Duration, 11) -} diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go deleted file mode 100644 index 694a1d0330..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/ewma.go +++ /dev/null @@ -1,118 +0,0 @@ -package metrics - -import ( - "math" - "sync" - "sync/atomic" -) - -// EWMAs continuously calculate an exponentially-weighted moving average -// based on an outside source of clock ticks. -type EWMA interface { - Rate() float64 - Snapshot() EWMA - Tick() - Update(int64) -} - -// NewEWMA constructs a new EWMA with the given alpha. -func NewEWMA(alpha float64) EWMA { - if UseNilMetrics { - return NilEWMA{} - } - return &StandardEWMA{alpha: alpha} -} - -// NewEWMA1 constructs a new EWMA for a one-minute moving average. -func NewEWMA1() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/1)) -} - -// NewEWMA5 constructs a new EWMA for a five-minute moving average. -func NewEWMA5() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/5)) -} - -// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average. -func NewEWMA15() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/15)) -} - -// EWMASnapshot is a read-only copy of another EWMA. -type EWMASnapshot float64 - -// Rate returns the rate of events per second at the time the snapshot was -// taken. -func (a EWMASnapshot) Rate() float64 { return float64(a) } - -// Snapshot returns the snapshot. -func (a EWMASnapshot) Snapshot() EWMA { return a } - -// Tick panics. -func (EWMASnapshot) Tick() { - panic("Tick called on an EWMASnapshot") -} - -// Update panics. -func (EWMASnapshot) Update(int64) { - panic("Update called on an EWMASnapshot") -} - -// NilEWMA is a no-op EWMA. -type NilEWMA struct{} - -// Rate is a no-op. -func (NilEWMA) Rate() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilEWMA) Snapshot() EWMA { return NilEWMA{} } - -// Tick is a no-op. -func (NilEWMA) Tick() {} - -// Update is a no-op. -func (NilEWMA) Update(n int64) {} - -// StandardEWMA is the standard implementation of an EWMA and tracks the number -// of uncounted events and processes them on each tick. It uses the -// sync/atomic package to manage uncounted events. -type StandardEWMA struct { - uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment - alpha float64 - rate float64 - init bool - mutex sync.Mutex -} - -// Rate returns the moving average rate of events per second. -func (a *StandardEWMA) Rate() float64 { - a.mutex.Lock() - defer a.mutex.Unlock() - return a.rate * float64(1e9) -} - -// Snapshot returns a read-only copy of the EWMA. -func (a *StandardEWMA) Snapshot() EWMA { - return EWMASnapshot(a.Rate()) -} - -// Tick ticks the clock to update the moving average. It assumes it is called -// every five seconds. -func (a *StandardEWMA) Tick() { - count := atomic.LoadInt64(&a.uncounted) - atomic.AddInt64(&a.uncounted, -count) - instantRate := float64(count) / float64(5e9) - a.mutex.Lock() - defer a.mutex.Unlock() - if a.init { - a.rate += a.alpha * (instantRate - a.rate) - } else { - a.init = true - a.rate = instantRate - } -} - -// Update adds n uncounted events. -func (a *StandardEWMA) Update(n int64) { - atomic.AddInt64(&a.uncounted, n) -} diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go deleted file mode 100644 index cb57a93889..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/gauge.go +++ /dev/null @@ -1,120 +0,0 @@ -package metrics - -import "sync/atomic" - -// Gauges hold an int64 value that can be set arbitrarily. -type Gauge interface { - Snapshot() Gauge - Update(int64) - Value() int64 -} - -// GetOrRegisterGauge returns an existing Gauge or constructs and registers a -// new StandardGauge. -func GetOrRegisterGauge(name string, r Registry) Gauge { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewGauge).(Gauge) -} - -// NewGauge constructs a new StandardGauge. -func NewGauge() Gauge { - if UseNilMetrics { - return NilGauge{} - } - return &StandardGauge{0} -} - -// NewRegisteredGauge constructs and registers a new StandardGauge. -func NewRegisteredGauge(name string, r Registry) Gauge { - c := NewGauge() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewFunctionalGauge constructs a new FunctionalGauge. -func NewFunctionalGauge(f func() int64) Gauge { - if UseNilMetrics { - return NilGauge{} - } - return &FunctionalGauge{value: f} -} - -// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. -func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge { - c := NewFunctionalGauge(f) - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// GaugeSnapshot is a read-only copy of another Gauge. -type GaugeSnapshot int64 - -// Snapshot returns the snapshot. -func (g GaugeSnapshot) Snapshot() Gauge { return g } - -// Update panics. -func (GaugeSnapshot) Update(int64) { - panic("Update called on a GaugeSnapshot") -} - -// Value returns the value at the time the snapshot was taken. -func (g GaugeSnapshot) Value() int64 { return int64(g) } - -// NilGauge is a no-op Gauge. -type NilGauge struct{} - -// Snapshot is a no-op. -func (NilGauge) Snapshot() Gauge { return NilGauge{} } - -// Update is a no-op. -func (NilGauge) Update(v int64) {} - -// Value is a no-op. -func (NilGauge) Value() int64 { return 0 } - -// StandardGauge is the standard implementation of a Gauge and uses the -// sync/atomic package to manage a single int64 value. -type StandardGauge struct { - value int64 -} - -// Snapshot returns a read-only copy of the gauge. -func (g *StandardGauge) Snapshot() Gauge { - return GaugeSnapshot(g.Value()) -} - -// Update updates the gauge's value. -func (g *StandardGauge) Update(v int64) { - atomic.StoreInt64(&g.value, v) -} - -// Value returns the gauge's current value. -func (g *StandardGauge) Value() int64 { - return atomic.LoadInt64(&g.value) -} - -// FunctionalGauge returns value from given function -type FunctionalGauge struct { - value func() int64 -} - -// Value returns the gauge's current value. -func (g FunctionalGauge) Value() int64 { - return g.value() -} - -// Snapshot returns the snapshot. -func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) } - -// Update panics. -func (FunctionalGauge) Update(int64) { - panic("Update called on a FunctionalGauge") -} diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go deleted file mode 100644 index 6f93920b2c..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go +++ /dev/null @@ -1,127 +0,0 @@ -package metrics - -import "sync" - -// GaugeFloat64s hold a float64 value that can be set arbitrarily. -type GaugeFloat64 interface { - Snapshot() GaugeFloat64 - Update(float64) - Value() float64 -} - -// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a -// new StandardGaugeFloat64. -func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64) -} - -// NewGaugeFloat64 constructs a new StandardGaugeFloat64. -func NewGaugeFloat64() GaugeFloat64 { - if UseNilMetrics { - return NilGaugeFloat64{} - } - return &StandardGaugeFloat64{ - value: 0.0, - } -} - -// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64. -func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { - c := NewGaugeFloat64() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewFunctionalGauge constructs a new FunctionalGauge. -func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 { - if UseNilMetrics { - return NilGaugeFloat64{} - } - return &FunctionalGaugeFloat64{value: f} -} - -// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. -func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 { - c := NewFunctionalGaugeFloat64(f) - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. -type GaugeFloat64Snapshot float64 - -// Snapshot returns the snapshot. -func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g } - -// Update panics. -func (GaugeFloat64Snapshot) Update(float64) { - panic("Update called on a GaugeFloat64Snapshot") -} - -// Value returns the value at the time the snapshot was taken. -func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) } - -// NilGauge is a no-op Gauge. -type NilGaugeFloat64 struct{} - -// Snapshot is a no-op. -func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} } - -// Update is a no-op. -func (NilGaugeFloat64) Update(v float64) {} - -// Value is a no-op. -func (NilGaugeFloat64) Value() float64 { return 0.0 } - -// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses -// sync.Mutex to manage a single float64 value. -type StandardGaugeFloat64 struct { - mutex sync.Mutex - value float64 -} - -// Snapshot returns a read-only copy of the gauge. -func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { - return GaugeFloat64Snapshot(g.Value()) -} - -// Update updates the gauge's value. -func (g *StandardGaugeFloat64) Update(v float64) { - g.mutex.Lock() - defer g.mutex.Unlock() - g.value = v -} - -// Value returns the gauge's current value. -func (g *StandardGaugeFloat64) Value() float64 { - g.mutex.Lock() - defer g.mutex.Unlock() - return g.value -} - -// FunctionalGaugeFloat64 returns value from given function -type FunctionalGaugeFloat64 struct { - value func() float64 -} - -// Value returns the gauge's current value. -func (g FunctionalGaugeFloat64) Value() float64 { - return g.value() -} - -// Snapshot returns the snapshot. -func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) } - -// Update panics. -func (FunctionalGaugeFloat64) Update(float64) { - panic("Update called on a FunctionalGaugeFloat64") -} diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go deleted file mode 100644 index abd0a7d291..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/graphite.go +++ /dev/null @@ -1,113 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "strconv" - "strings" - "time" -) - -// GraphiteConfig provides a container with configuration parameters for -// the Graphite exporter -type GraphiteConfig struct { - Addr *net.TCPAddr // Network address to connect to - Registry Registry // Registry to be exported - FlushInterval time.Duration // Flush interval - DurationUnit time.Duration // Time conversion unit for durations - Prefix string // Prefix to be prepended to metric names - Percentiles []float64 // Percentiles to export from timers and histograms -} - -// Graphite is a blocking exporter function which reports metrics in r -// to a graphite server located at addr, flushing them every d duration -// and prepending metric names with prefix. -func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { - GraphiteWithConfig(GraphiteConfig{ - Addr: addr, - Registry: r, - FlushInterval: d, - DurationUnit: time.Nanosecond, - Prefix: prefix, - Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, - }) -} - -// GraphiteWithConfig is a blocking exporter function just like Graphite, -// but it takes a GraphiteConfig instead. -func GraphiteWithConfig(c GraphiteConfig) { - log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") - for _ = range time.Tick(c.FlushInterval) { - if err := graphite(&c); nil != err { - log.Println(err) - } - } -} - -// GraphiteOnce performs a single submission to Graphite, returning a -// non-nil error on failed connections. This can be used in a loop -// similar to GraphiteWithConfig for custom error handling. -func GraphiteOnce(c GraphiteConfig) error { - log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") - return graphite(&c) -} - -func graphite(c *GraphiteConfig) error { - now := time.Now().Unix() - du := float64(c.DurationUnit) - conn, err := net.DialTCP("tcp", nil, c.Addr) - if nil != err { - return err - } - defer conn.Close() - w := bufio.NewWriter(conn) - c.Registry.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) - case Gauge: - fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) - case GaugeFloat64: - fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now) - fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now) - fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now) - fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) - } - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now) - fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now) - fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now) - fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now) - fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now) - fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now) - fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) - } - fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now) - fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now) - fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now) - fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now) - } - w.Flush() - }) - return nil -} diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go deleted file mode 100644 index 445131caee..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/healthcheck.go +++ /dev/null @@ -1,61 +0,0 @@ -package metrics - -// Healthchecks hold an error value describing an arbitrary up/down status. -type Healthcheck interface { - Check() - Error() error - Healthy() - Unhealthy(error) -} - -// NewHealthcheck constructs a new Healthcheck which will use the given -// function to update its status. -func NewHealthcheck(f func(Healthcheck)) Healthcheck { - if UseNilMetrics { - return NilHealthcheck{} - } - return &StandardHealthcheck{nil, f} -} - -// NilHealthcheck is a no-op. -type NilHealthcheck struct{} - -// Check is a no-op. -func (NilHealthcheck) Check() {} - -// Error is a no-op. -func (NilHealthcheck) Error() error { return nil } - -// Healthy is a no-op. -func (NilHealthcheck) Healthy() {} - -// Unhealthy is a no-op. -func (NilHealthcheck) Unhealthy(error) {} - -// StandardHealthcheck is the standard implementation of a Healthcheck and -// stores the status and a function to call to update the status. -type StandardHealthcheck struct { - err error - f func(Healthcheck) -} - -// Check runs the healthcheck function to update the healthcheck's status. -func (h *StandardHealthcheck) Check() { - h.f(h) -} - -// Error returns the healthcheck's status, which will be nil if it is healthy. -func (h *StandardHealthcheck) Error() error { - return h.err -} - -// Healthy marks the healthcheck as healthy. -func (h *StandardHealthcheck) Healthy() { - h.err = nil -} - -// Unhealthy marks the healthcheck as unhealthy. The error is stored and -// may be retrieved by the Error method. -func (h *StandardHealthcheck) Unhealthy(err error) { - h.err = err -} diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go deleted file mode 100644 index dbc837fe4d..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/histogram.go +++ /dev/null @@ -1,202 +0,0 @@ -package metrics - -// Histograms calculate distribution statistics from a series of int64 values. -type Histogram interface { - Clear() - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Sample() Sample - Snapshot() Histogram - StdDev() float64 - Sum() int64 - Update(int64) - Variance() float64 -} - -// GetOrRegisterHistogram returns an existing Histogram or constructs and -// registers a new StandardHistogram. -func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram) -} - -// NewHistogram constructs a new StandardHistogram from a Sample. -func NewHistogram(s Sample) Histogram { - if UseNilMetrics { - return NilHistogram{} - } - return &StandardHistogram{sample: s} -} - -// NewRegisteredHistogram constructs and registers a new StandardHistogram from -// a Sample. -func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram { - c := NewHistogram(s) - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// HistogramSnapshot is a read-only copy of another Histogram. -type HistogramSnapshot struct { - sample *SampleSnapshot -} - -// Clear panics. -func (*HistogramSnapshot) Clear() { - panic("Clear called on a HistogramSnapshot") -} - -// Count returns the number of samples recorded at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() } - -// Max returns the maximum value in the sample at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() } - -// Mean returns the mean of the values in the sample at the time the snapshot -// was taken. -func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() } - -// Min returns the minimum value in the sample at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() } - -// Percentile returns an arbitrary percentile of values in the sample at the -// time the snapshot was taken. -func (h *HistogramSnapshot) Percentile(p float64) float64 { - return h.sample.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the sample -// at the time the snapshot was taken. -func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 { - return h.sample.Percentiles(ps) -} - -// Sample returns the Sample underlying the histogram. -func (h *HistogramSnapshot) Sample() Sample { return h.sample } - -// Snapshot returns the snapshot. -func (h *HistogramSnapshot) Snapshot() Histogram { return h } - -// StdDev returns the standard deviation of the values in the sample at the -// time the snapshot was taken. -func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() } - -// Sum returns the sum in the sample at the time the snapshot was taken. -func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() } - -// Update panics. -func (*HistogramSnapshot) Update(int64) { - panic("Update called on a HistogramSnapshot") -} - -// Variance returns the variance of inputs at the time the snapshot was taken. -func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() } - -// NilHistogram is a no-op Histogram. -type NilHistogram struct{} - -// Clear is a no-op. -func (NilHistogram) Clear() {} - -// Count is a no-op. -func (NilHistogram) Count() int64 { return 0 } - -// Max is a no-op. -func (NilHistogram) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilHistogram) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilHistogram) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilHistogram) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilHistogram) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Sample is a no-op. -func (NilHistogram) Sample() Sample { return NilSample{} } - -// Snapshot is a no-op. -func (NilHistogram) Snapshot() Histogram { return NilHistogram{} } - -// StdDev is a no-op. -func (NilHistogram) StdDev() float64 { return 0.0 } - -// Sum is a no-op. -func (NilHistogram) Sum() int64 { return 0 } - -// Update is a no-op. -func (NilHistogram) Update(v int64) {} - -// Variance is a no-op. -func (NilHistogram) Variance() float64 { return 0.0 } - -// StandardHistogram is the standard implementation of a Histogram and uses a -// Sample to bound its memory use. -type StandardHistogram struct { - sample Sample -} - -// Clear clears the histogram and its sample. -func (h *StandardHistogram) Clear() { h.sample.Clear() } - -// Count returns the number of samples recorded since the histogram was last -// cleared. -func (h *StandardHistogram) Count() int64 { return h.sample.Count() } - -// Max returns the maximum value in the sample. -func (h *StandardHistogram) Max() int64 { return h.sample.Max() } - -// Mean returns the mean of the values in the sample. -func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() } - -// Min returns the minimum value in the sample. -func (h *StandardHistogram) Min() int64 { return h.sample.Min() } - -// Percentile returns an arbitrary percentile of the values in the sample. -func (h *StandardHistogram) Percentile(p float64) float64 { - return h.sample.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of the values in the -// sample. -func (h *StandardHistogram) Percentiles(ps []float64) []float64 { - return h.sample.Percentiles(ps) -} - -// Sample returns the Sample underlying the histogram. -func (h *StandardHistogram) Sample() Sample { return h.sample } - -// Snapshot returns a read-only copy of the histogram. -func (h *StandardHistogram) Snapshot() Histogram { - return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)} -} - -// StdDev returns the standard deviation of the values in the sample. -func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() } - -// Sum returns the sum in the sample. -func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() } - -// Update samples a new value. -func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) } - -// Variance returns the variance of the values in the sample. -func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() } diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go deleted file mode 100644 index 174b9477e9..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/json.go +++ /dev/null @@ -1,31 +0,0 @@ -package metrics - -import ( - "encoding/json" - "io" - "time" -) - -// MarshalJSON returns a byte slice containing a JSON representation of all -// the metrics in the Registry. -func (r *StandardRegistry) MarshalJSON() ([]byte, error) { - return json.Marshal(r.GetAll()) -} - -// WriteJSON writes metrics from the given registry periodically to the -// specified io.Writer as JSON. -func WriteJSON(r Registry, d time.Duration, w io.Writer) { - for _ = range time.Tick(d) { - WriteJSONOnce(r, w) - } -} - -// WriteJSONOnce writes metrics from the given registry to the specified -// io.Writer as JSON. -func WriteJSONOnce(r Registry, w io.Writer) { - json.NewEncoder(w).Encode(r) -} - -func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) { - return json.Marshal(p.GetAll()) -} diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go deleted file mode 100644 index f8074c0457..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/log.go +++ /dev/null @@ -1,80 +0,0 @@ -package metrics - -import ( - "time" -) - -type Logger interface { - Printf(format string, v ...interface{}) -} - -func Log(r Registry, freq time.Duration, l Logger) { - LogScaled(r, freq, time.Nanosecond, l) -} - -// Output each metric in the given registry periodically using the given -// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos. -func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { - du := float64(scale) - duSuffix := scale.String()[1:] - - for _ = range time.Tick(freq) { - r.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - l.Printf("counter %s\n", name) - l.Printf(" count: %9d\n", metric.Count()) - case Gauge: - l.Printf("gauge %s\n", name) - l.Printf(" value: %9d\n", metric.Value()) - case GaugeFloat64: - l.Printf("gauge %s\n", name) - l.Printf(" value: %f\n", metric.Value()) - case Healthcheck: - metric.Check() - l.Printf("healthcheck %s\n", name) - l.Printf(" error: %v\n", metric.Error()) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - l.Printf("histogram %s\n", name) - l.Printf(" count: %9d\n", h.Count()) - l.Printf(" min: %9d\n", h.Min()) - l.Printf(" max: %9d\n", h.Max()) - l.Printf(" mean: %12.2f\n", h.Mean()) - l.Printf(" stddev: %12.2f\n", h.StdDev()) - l.Printf(" median: %12.2f\n", ps[0]) - l.Printf(" 75%%: %12.2f\n", ps[1]) - l.Printf(" 95%%: %12.2f\n", ps[2]) - l.Printf(" 99%%: %12.2f\n", ps[3]) - l.Printf(" 99.9%%: %12.2f\n", ps[4]) - case Meter: - m := metric.Snapshot() - l.Printf("meter %s\n", name) - l.Printf(" count: %9d\n", m.Count()) - l.Printf(" 1-min rate: %12.2f\n", m.Rate1()) - l.Printf(" 5-min rate: %12.2f\n", m.Rate5()) - l.Printf(" 15-min rate: %12.2f\n", m.Rate15()) - l.Printf(" mean rate: %12.2f\n", m.RateMean()) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - l.Printf("timer %s\n", name) - l.Printf(" count: %9d\n", t.Count()) - l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix) - l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix) - l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix) - l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix) - l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix) - l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix) - l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix) - l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix) - l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix) - l.Printf(" 1-min rate: %12.2f\n", t.Rate1()) - l.Printf(" 5-min rate: %12.2f\n", t.Rate5()) - l.Printf(" 15-min rate: %12.2f\n", t.Rate15()) - l.Printf(" mean rate: %12.2f\n", t.RateMean()) - } - }) - } -} diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md deleted file mode 100644 index 47454f54b6..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/memory.md +++ /dev/null @@ -1,285 +0,0 @@ -Memory usage -============ - -(Highly unscientific.) - -Command used to gather static memory usage: - -```sh -grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status" -``` - -Program used to gather baseline memory usage: - -```go -package main - -import "time" - -func main() { - time.Sleep(600e9) -} -``` - -Baseline --------- - -``` -VmPeak: 42604 kB -VmSize: 42604 kB -VmLck: 0 kB -VmHWM: 1120 kB -VmRSS: 1120 kB -VmData: 35460 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 36 kB -VmSwap: 0 kB -``` - -Program used to gather metric memory usage (with other metrics being similar): - -```go -package main - -import ( - "fmt" - "metrics" - "time" -) - -func main() { - fmt.Sprintf("foo") - metrics.NewRegistry() - time.Sleep(600e9) -} -``` - -1000 counters registered ------------------------- - -``` -VmPeak: 44016 kB -VmSize: 44016 kB -VmLck: 0 kB -VmHWM: 1928 kB -VmRSS: 1928 kB -VmData: 36868 kB -VmStk: 136 kB -VmExe: 1024 kB -VmLib: 1848 kB -VmPTE: 40 kB -VmSwap: 0 kB -``` - -**1.412 kB virtual, TODO 0.808 kB resident per counter.** - -100000 counters registered --------------------------- - -``` -VmPeak: 55024 kB -VmSize: 55024 kB -VmLck: 0 kB -VmHWM: 12440 kB -VmRSS: 12440 kB -VmData: 47876 kB -VmStk: 136 kB -VmExe: 1024 kB -VmLib: 1848 kB -VmPTE: 64 kB -VmSwap: 0 kB -``` - -**0.1242 kB virtual, 0.1132 kB resident per counter.** - -1000 gauges registered ----------------------- - -``` -VmPeak: 44012 kB -VmSize: 44012 kB -VmLck: 0 kB -VmHWM: 1928 kB -VmRSS: 1928 kB -VmData: 36868 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 40 kB -VmSwap: 0 kB -``` - -**1.408 kB virtual, 0.808 kB resident per counter.** - -100000 gauges registered ------------------------- - -``` -VmPeak: 55020 kB -VmSize: 55020 kB -VmLck: 0 kB -VmHWM: 12432 kB -VmRSS: 12432 kB -VmData: 47876 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 60 kB -VmSwap: 0 kB -``` - -**0.12416 kB virtual, 0.11312 resident per gauge.** - -1000 histograms with a uniform sample size of 1028 --------------------------------------------------- - -``` -VmPeak: 72272 kB -VmSize: 72272 kB -VmLck: 0 kB -VmHWM: 16204 kB -VmRSS: 16204 kB -VmData: 65100 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 80 kB -VmSwap: 0 kB -``` - -**29.668 kB virtual, TODO 15.084 resident per histogram.** - -10000 histograms with a uniform sample size of 1028 ---------------------------------------------------- - -``` -VmPeak: 256912 kB -VmSize: 256912 kB -VmLck: 0 kB -VmHWM: 146204 kB -VmRSS: 146204 kB -VmData: 249740 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 448 kB -VmSwap: 0 kB -``` - -**21.4308 kB virtual, 14.5084 kB resident per histogram.** - -50000 histograms with a uniform sample size of 1028 ---------------------------------------------------- - -``` -VmPeak: 908112 kB -VmSize: 908112 kB -VmLck: 0 kB -VmHWM: 645832 kB -VmRSS: 645588 kB -VmData: 900940 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 1716 kB -VmSwap: 1544 kB -``` - -**17.31016 kB virtual, 12.88936 kB resident per histogram.** - -1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 -------------------------------------------------------------------------------------- - -``` -VmPeak: 62480 kB -VmSize: 62480 kB -VmLck: 0 kB -VmHWM: 11572 kB -VmRSS: 11572 kB -VmData: 55308 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 64 kB -VmSwap: 0 kB -``` - -**19.876 kB virtual, 10.452 kB resident per histogram.** - -10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 --------------------------------------------------------------------------------------- - -``` -VmPeak: 153296 kB -VmSize: 153296 kB -VmLck: 0 kB -VmHWM: 101176 kB -VmRSS: 101176 kB -VmData: 146124 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 240 kB -VmSwap: 0 kB -``` - -**11.0692 kB virtual, 10.0056 kB resident per histogram.** - -50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 --------------------------------------------------------------------------------------- - -``` -VmPeak: 557264 kB -VmSize: 557264 kB -VmLck: 0 kB -VmHWM: 501056 kB -VmRSS: 501056 kB -VmData: 550092 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 1032 kB -VmSwap: 0 kB -``` - -**10.2932 kB virtual, 9.99872 kB resident per histogram.** - -1000 meters ------------ - -``` -VmPeak: 74504 kB -VmSize: 74504 kB -VmLck: 0 kB -VmHWM: 24124 kB -VmRSS: 24124 kB -VmData: 67340 kB -VmStk: 136 kB -VmExe: 1040 kB -VmLib: 1848 kB -VmPTE: 92 kB -VmSwap: 0 kB -``` - -**31.9 kB virtual, 23.004 kB resident per meter.** - -10000 meters ------------- - -``` -VmPeak: 278920 kB -VmSize: 278920 kB -VmLck: 0 kB -VmHWM: 227300 kB -VmRSS: 227300 kB -VmData: 271756 kB -VmStk: 136 kB -VmExe: 1040 kB -VmLib: 1848 kB -VmPTE: 488 kB -VmSwap: 0 kB -``` - -**23.6316 kB virtual, 22.618 kB resident per meter.** diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go deleted file mode 100644 index 53ff329b8c..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/meter.go +++ /dev/null @@ -1,264 +0,0 @@ -package metrics - -import ( - "sync" - "time" -) - -// Meters count events to produce exponentially-weighted moving average rates -// at one-, five-, and fifteen-minutes and a mean rate. -type Meter interface { - Count() int64 - Mark(int64) - Rate1() float64 - Rate5() float64 - Rate15() float64 - RateMean() float64 - Snapshot() Meter - Stop() -} - -// GetOrRegisterMeter returns an existing Meter or constructs and registers a -// new StandardMeter. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func GetOrRegisterMeter(name string, r Registry) Meter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewMeter).(Meter) -} - -// NewMeter constructs a new StandardMeter and launches a goroutine. -// Be sure to call Stop() once the meter is of no use to allow for garbage collection. -func NewMeter() Meter { - if UseNilMetrics { - return NilMeter{} - } - m := newStandardMeter() - arbiter.Lock() - defer arbiter.Unlock() - arbiter.meters[m] = struct{}{} - if !arbiter.started { - arbiter.started = true - go arbiter.tick() - } - return m -} - -// NewMeter constructs and registers a new StandardMeter and launches a -// goroutine. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func NewRegisteredMeter(name string, r Registry) Meter { - c := NewMeter() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// MeterSnapshot is a read-only copy of another Meter. -type MeterSnapshot struct { - count int64 - rate1, rate5, rate15, rateMean float64 -} - -// Count returns the count of events at the time the snapshot was taken. -func (m *MeterSnapshot) Count() int64 { return m.count } - -// Mark panics. -func (*MeterSnapshot) Mark(n int64) { - panic("Mark called on a MeterSnapshot") -} - -// Rate1 returns the one-minute moving average rate of events per second at the -// time the snapshot was taken. -func (m *MeterSnapshot) Rate1() float64 { return m.rate1 } - -// Rate5 returns the five-minute moving average rate of events per second at -// the time the snapshot was taken. -func (m *MeterSnapshot) Rate5() float64 { return m.rate5 } - -// Rate15 returns the fifteen-minute moving average rate of events per second -// at the time the snapshot was taken. -func (m *MeterSnapshot) Rate15() float64 { return m.rate15 } - -// RateMean returns the meter's mean rate of events per second at the time the -// snapshot was taken. -func (m *MeterSnapshot) RateMean() float64 { return m.rateMean } - -// Snapshot returns the snapshot. -func (m *MeterSnapshot) Snapshot() Meter { return m } - -// Stop is a no-op. -func (m *MeterSnapshot) Stop() {} - -// NilMeter is a no-op Meter. -type NilMeter struct{} - -// Count is a no-op. -func (NilMeter) Count() int64 { return 0 } - -// Mark is a no-op. -func (NilMeter) Mark(n int64) {} - -// Rate1 is a no-op. -func (NilMeter) Rate1() float64 { return 0.0 } - -// Rate5 is a no-op. -func (NilMeter) Rate5() float64 { return 0.0 } - -// Rate15is a no-op. -func (NilMeter) Rate15() float64 { return 0.0 } - -// RateMean is a no-op. -func (NilMeter) RateMean() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilMeter) Snapshot() Meter { return NilMeter{} } - -// Stop is a no-op. -func (NilMeter) Stop() {} - -// StandardMeter is the standard implementation of a Meter. -type StandardMeter struct { - lock sync.RWMutex - snapshot *MeterSnapshot - a1, a5, a15 EWMA - startTime time.Time - stopped bool -} - -func newStandardMeter() *StandardMeter { - return &StandardMeter{ - snapshot: &MeterSnapshot{}, - a1: NewEWMA1(), - a5: NewEWMA5(), - a15: NewEWMA15(), - startTime: time.Now(), - } -} - -// Stop stops the meter, Mark() will be a no-op if you use it after being stopped. -func (m *StandardMeter) Stop() { - m.lock.Lock() - stopped := m.stopped - m.stopped = true - m.lock.Unlock() - if !stopped { - arbiter.Lock() - delete(arbiter.meters, m) - arbiter.Unlock() - } -} - -// Count returns the number of events recorded. -func (m *StandardMeter) Count() int64 { - m.lock.RLock() - count := m.snapshot.count - m.lock.RUnlock() - return count -} - -// Mark records the occurance of n events. -func (m *StandardMeter) Mark(n int64) { - m.lock.Lock() - defer m.lock.Unlock() - if m.stopped { - return - } - m.snapshot.count += n - m.a1.Update(n) - m.a5.Update(n) - m.a15.Update(n) - m.updateSnapshot() -} - -// Rate1 returns the one-minute moving average rate of events per second. -func (m *StandardMeter) Rate1() float64 { - m.lock.RLock() - rate1 := m.snapshot.rate1 - m.lock.RUnlock() - return rate1 -} - -// Rate5 returns the five-minute moving average rate of events per second. -func (m *StandardMeter) Rate5() float64 { - m.lock.RLock() - rate5 := m.snapshot.rate5 - m.lock.RUnlock() - return rate5 -} - -// Rate15 returns the fifteen-minute moving average rate of events per second. -func (m *StandardMeter) Rate15() float64 { - m.lock.RLock() - rate15 := m.snapshot.rate15 - m.lock.RUnlock() - return rate15 -} - -// RateMean returns the meter's mean rate of events per second. -func (m *StandardMeter) RateMean() float64 { - m.lock.RLock() - rateMean := m.snapshot.rateMean - m.lock.RUnlock() - return rateMean -} - -// Snapshot returns a read-only copy of the meter. -func (m *StandardMeter) Snapshot() Meter { - m.lock.RLock() - snapshot := *m.snapshot - m.lock.RUnlock() - return &snapshot -} - -func (m *StandardMeter) updateSnapshot() { - // should run with write lock held on m.lock - snapshot := m.snapshot - snapshot.rate1 = m.a1.Rate() - snapshot.rate5 = m.a5.Rate() - snapshot.rate15 = m.a15.Rate() - snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds() -} - -func (m *StandardMeter) tick() { - m.lock.Lock() - defer m.lock.Unlock() - m.a1.Tick() - m.a5.Tick() - m.a15.Tick() - m.updateSnapshot() -} - -// meterArbiter ticks meters every 5s from a single goroutine. -// meters are references in a set for future stopping. -type meterArbiter struct { - sync.RWMutex - started bool - meters map[*StandardMeter]struct{} - ticker *time.Ticker -} - -var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})} - -// Ticks meters on the scheduled interval -func (ma *meterArbiter) tick() { - for { - select { - case <-ma.ticker.C: - ma.tickMeters() - } - } -} - -func (ma *meterArbiter) tickMeters() { - ma.RLock() - defer ma.RUnlock() - for meter := range ma.meters { - meter.tick() - } -} diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go deleted file mode 100644 index b97a49ed12..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/metrics.go +++ /dev/null @@ -1,13 +0,0 @@ -// Go port of Coda Hale's Metrics library -// -// -// -// Coda Hale's original work: -package metrics - -// UseNilMetrics is checked by the constructor functions for all of the -// standard metrics. If it is true, the metric returned is a stub. -// -// This global kill-switch helps quantify the observer effect and makes -// for less cluttered pprof profiles. -var UseNilMetrics bool = false diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go deleted file mode 100644 index 266b6c93d2..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/opentsdb.go +++ /dev/null @@ -1,119 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "os" - "strings" - "time" -) - -var shortHostName string = "" - -// OpenTSDBConfig provides a container with configuration parameters for -// the OpenTSDB exporter -type OpenTSDBConfig struct { - Addr *net.TCPAddr // Network address to connect to - Registry Registry // Registry to be exported - FlushInterval time.Duration // Flush interval - DurationUnit time.Duration // Time conversion unit for durations - Prefix string // Prefix to be prepended to metric names -} - -// OpenTSDB is a blocking exporter function which reports metrics in r -// to a TSDB server located at addr, flushing them every d duration -// and prepending metric names with prefix. -func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { - OpenTSDBWithConfig(OpenTSDBConfig{ - Addr: addr, - Registry: r, - FlushInterval: d, - DurationUnit: time.Nanosecond, - Prefix: prefix, - }) -} - -// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB, -// but it takes a OpenTSDBConfig instead. -func OpenTSDBWithConfig(c OpenTSDBConfig) { - for _ = range time.Tick(c.FlushInterval) { - if err := openTSDB(&c); nil != err { - log.Println(err) - } - } -} - -func getShortHostname() string { - if shortHostName == "" { - host, _ := os.Hostname() - if index := strings.Index(host, "."); index > 0 { - shortHostName = host[:index] - } else { - shortHostName = host - } - } - return shortHostName -} - -func openTSDB(c *OpenTSDBConfig) error { - shortHostname := getShortHostname() - now := time.Now().Unix() - du := float64(c.DurationUnit) - conn, err := net.DialTCP("tcp", nil, c.Addr) - if nil != err { - return err - } - defer conn.Close() - w := bufio.NewWriter(conn) - c.Registry.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname) - case Gauge: - fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) - case GaugeFloat64: - fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname) - fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname) - fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname) - fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname) - fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname) - fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname) - fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname) - fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname) - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname) - fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname) - fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname) - fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname) - fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname) - fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname) - } - w.Flush() - }) - return nil -} diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go deleted file mode 100644 index 6c0007b173..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/registry.go +++ /dev/null @@ -1,354 +0,0 @@ -package metrics - -import ( - "fmt" - "reflect" - "strings" - "sync" -) - -// DuplicateMetric is the error returned by Registry.Register when a metric -// already exists. If you mean to Register that metric you must first -// Unregister the existing metric. -type DuplicateMetric string - -func (err DuplicateMetric) Error() string { - return fmt.Sprintf("duplicate metric: %s", string(err)) -} - -// A Registry holds references to a set of metrics by name and can iterate -// over them, calling callback functions provided by the user. -// -// This is an interface so as to encourage other structs to implement -// the Registry API as appropriate. -type Registry interface { - - // Call the given function for each registered metric. - Each(func(string, interface{})) - - // Get the metric by the given name or nil if none is registered. - Get(string) interface{} - - // GetAll metrics in the Registry. - GetAll() map[string]map[string]interface{} - - // Gets an existing metric or registers the given one. - // The interface can be the metric to register if not found in registry, - // or a function returning the metric for lazy instantiation. - GetOrRegister(string, interface{}) interface{} - - // Register the given metric under the given name. - Register(string, interface{}) error - - // Run all registered healthchecks. - RunHealthchecks() - - // Unregister the metric with the given name. - Unregister(string) - - // Unregister all metrics. (Mostly for testing.) - UnregisterAll() -} - -// The standard implementation of a Registry is a mutex-protected map -// of names to metrics. -type StandardRegistry struct { - metrics map[string]interface{} - mutex sync.Mutex -} - -// Create a new registry. -func NewRegistry() Registry { - return &StandardRegistry{metrics: make(map[string]interface{})} -} - -// Call the given function for each registered metric. -func (r *StandardRegistry) Each(f func(string, interface{})) { - for name, i := range r.registered() { - f(name, i) - } -} - -// Get the metric by the given name or nil if none is registered. -func (r *StandardRegistry) Get(name string) interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - return r.metrics[name] -} - -// Gets an existing metric or creates and registers a new one. Threadsafe -// alternative to calling Get and Register on failure. -// The interface can be the metric to register if not found in registry, -// or a function returning the metric for lazy instantiation. -func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - if metric, ok := r.metrics[name]; ok { - return metric - } - if v := reflect.ValueOf(i); v.Kind() == reflect.Func { - i = v.Call(nil)[0].Interface() - } - r.register(name, i) - return i -} - -// Register the given metric under the given name. Returns a DuplicateMetric -// if a metric by the given name is already registered. -func (r *StandardRegistry) Register(name string, i interface{}) error { - r.mutex.Lock() - defer r.mutex.Unlock() - return r.register(name, i) -} - -// Run all registered healthchecks. -func (r *StandardRegistry) RunHealthchecks() { - r.mutex.Lock() - defer r.mutex.Unlock() - for _, i := range r.metrics { - if h, ok := i.(Healthcheck); ok { - h.Check() - } - } -} - -// GetAll metrics in the Registry -func (r *StandardRegistry) GetAll() map[string]map[string]interface{} { - data := make(map[string]map[string]interface{}) - r.Each(func(name string, i interface{}) { - values := make(map[string]interface{}) - switch metric := i.(type) { - case Counter: - values["count"] = metric.Count() - case Gauge: - values["value"] = metric.Value() - case GaugeFloat64: - values["value"] = metric.Value() - case Healthcheck: - values["error"] = nil - metric.Check() - if err := metric.Error(); nil != err { - values["error"] = metric.Error().Error() - } - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = h.Count() - values["min"] = h.Min() - values["max"] = h.Max() - values["mean"] = h.Mean() - values["stddev"] = h.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - case Meter: - m := metric.Snapshot() - values["count"] = m.Count() - values["1m.rate"] = m.Rate1() - values["5m.rate"] = m.Rate5() - values["15m.rate"] = m.Rate15() - values["mean.rate"] = m.RateMean() - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = t.Count() - values["min"] = t.Min() - values["max"] = t.Max() - values["mean"] = t.Mean() - values["stddev"] = t.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - values["1m.rate"] = t.Rate1() - values["5m.rate"] = t.Rate5() - values["15m.rate"] = t.Rate15() - values["mean.rate"] = t.RateMean() - } - data[name] = values - }) - return data -} - -// Unregister the metric with the given name. -func (r *StandardRegistry) Unregister(name string) { - r.mutex.Lock() - defer r.mutex.Unlock() - r.stop(name) - delete(r.metrics, name) -} - -// Unregister all metrics. (Mostly for testing.) -func (r *StandardRegistry) UnregisterAll() { - r.mutex.Lock() - defer r.mutex.Unlock() - for name, _ := range r.metrics { - r.stop(name) - delete(r.metrics, name) - } -} - -func (r *StandardRegistry) register(name string, i interface{}) error { - if _, ok := r.metrics[name]; ok { - return DuplicateMetric(name) - } - switch i.(type) { - case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer: - r.metrics[name] = i - } - return nil -} - -func (r *StandardRegistry) registered() map[string]interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - metrics := make(map[string]interface{}, len(r.metrics)) - for name, i := range r.metrics { - metrics[name] = i - } - return metrics -} - -func (r *StandardRegistry) stop(name string) { - if i, ok := r.metrics[name]; ok { - if s, ok := i.(Stoppable); ok { - s.Stop() - } - } -} - -// Stoppable defines the metrics which has to be stopped. -type Stoppable interface { - Stop() -} - -type PrefixedRegistry struct { - underlying Registry - prefix string -} - -func NewPrefixedRegistry(prefix string) Registry { - return &PrefixedRegistry{ - underlying: NewRegistry(), - prefix: prefix, - } -} - -func NewPrefixedChildRegistry(parent Registry, prefix string) Registry { - return &PrefixedRegistry{ - underlying: parent, - prefix: prefix, - } -} - -// Call the given function for each registered metric. -func (r *PrefixedRegistry) Each(fn func(string, interface{})) { - wrappedFn := func(prefix string) func(string, interface{}) { - return func(name string, iface interface{}) { - if strings.HasPrefix(name, prefix) { - fn(name, iface) - } else { - return - } - } - } - - baseRegistry, prefix := findPrefix(r, "") - baseRegistry.Each(wrappedFn(prefix)) -} - -func findPrefix(registry Registry, prefix string) (Registry, string) { - switch r := registry.(type) { - case *PrefixedRegistry: - return findPrefix(r.underlying, r.prefix+prefix) - case *StandardRegistry: - return r, prefix - } - return nil, "" -} - -// Get the metric by the given name or nil if none is registered. -func (r *PrefixedRegistry) Get(name string) interface{} { - realName := r.prefix + name - return r.underlying.Get(realName) -} - -// Gets an existing metric or registers the given one. -// The interface can be the metric to register if not found in registry, -// or a function returning the metric for lazy instantiation. -func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} { - realName := r.prefix + name - return r.underlying.GetOrRegister(realName, metric) -} - -// Register the given metric under the given name. The name will be prefixed. -func (r *PrefixedRegistry) Register(name string, metric interface{}) error { - realName := r.prefix + name - return r.underlying.Register(realName, metric) -} - -// Run all registered healthchecks. -func (r *PrefixedRegistry) RunHealthchecks() { - r.underlying.RunHealthchecks() -} - -// GetAll metrics in the Registry -func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} { - return r.underlying.GetAll() -} - -// Unregister the metric with the given name. The name will be prefixed. -func (r *PrefixedRegistry) Unregister(name string) { - realName := r.prefix + name - r.underlying.Unregister(realName) -} - -// Unregister all metrics. (Mostly for testing.) -func (r *PrefixedRegistry) UnregisterAll() { - r.underlying.UnregisterAll() -} - -var DefaultRegistry Registry = NewRegistry() - -// Call the given function for each registered metric. -func Each(f func(string, interface{})) { - DefaultRegistry.Each(f) -} - -// Get the metric by the given name or nil if none is registered. -func Get(name string) interface{} { - return DefaultRegistry.Get(name) -} - -// Gets an existing metric or creates and registers a new one. Threadsafe -// alternative to calling Get and Register on failure. -func GetOrRegister(name string, i interface{}) interface{} { - return DefaultRegistry.GetOrRegister(name, i) -} - -// Register the given metric under the given name. Returns a DuplicateMetric -// if a metric by the given name is already registered. -func Register(name string, i interface{}) error { - return DefaultRegistry.Register(name, i) -} - -// Register the given metric under the given name. Panics if a metric by the -// given name is already registered. -func MustRegister(name string, i interface{}) { - if err := Register(name, i); err != nil { - panic(err) - } -} - -// Run all registered healthchecks. -func RunHealthchecks() { - DefaultRegistry.RunHealthchecks() -} - -// Unregister the metric with the given name. -func Unregister(name string) { - DefaultRegistry.Unregister(name) -} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go deleted file mode 100644 index 11c6b785a0..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/runtime.go +++ /dev/null @@ -1,212 +0,0 @@ -package metrics - -import ( - "runtime" - "runtime/pprof" - "time" -) - -var ( - memStats runtime.MemStats - runtimeMetrics struct { - MemStats struct { - Alloc Gauge - BuckHashSys Gauge - DebugGC Gauge - EnableGC Gauge - Frees Gauge - HeapAlloc Gauge - HeapIdle Gauge - HeapInuse Gauge - HeapObjects Gauge - HeapReleased Gauge - HeapSys Gauge - LastGC Gauge - Lookups Gauge - Mallocs Gauge - MCacheInuse Gauge - MCacheSys Gauge - MSpanInuse Gauge - MSpanSys Gauge - NextGC Gauge - NumGC Gauge - GCCPUFraction GaugeFloat64 - PauseNs Histogram - PauseTotalNs Gauge - StackInuse Gauge - StackSys Gauge - Sys Gauge - TotalAlloc Gauge - } - NumCgoCall Gauge - NumGoroutine Gauge - NumThread Gauge - ReadMemStats Timer - } - frees uint64 - lookups uint64 - mallocs uint64 - numGC uint32 - numCgoCalls int64 - - threadCreateProfile = pprof.Lookup("threadcreate") -) - -// Capture new values for the Go runtime statistics exported in -// runtime.MemStats. This is designed to be called as a goroutine. -func CaptureRuntimeMemStats(r Registry, d time.Duration) { - for _ = range time.Tick(d) { - CaptureRuntimeMemStatsOnce(r) - } -} - -// Capture new values for the Go runtime statistics exported in -// runtime.MemStats. This is designed to be called in a background -// goroutine. Giving a registry which has not been given to -// RegisterRuntimeMemStats will panic. -// -// Be very careful with this because runtime.ReadMemStats calls the C -// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld() -// and that last one does what it says on the tin. -func CaptureRuntimeMemStatsOnce(r Registry) { - t := time.Now() - runtime.ReadMemStats(&memStats) // This takes 50-200us. - runtimeMetrics.ReadMemStats.UpdateSince(t) - - runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc)) - runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys)) - if memStats.DebugGC { - runtimeMetrics.MemStats.DebugGC.Update(1) - } else { - runtimeMetrics.MemStats.DebugGC.Update(0) - } - if memStats.EnableGC { - runtimeMetrics.MemStats.EnableGC.Update(1) - } else { - runtimeMetrics.MemStats.EnableGC.Update(0) - } - - runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees)) - runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc)) - runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle)) - runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse)) - runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects)) - runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased)) - runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys)) - runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC)) - runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups)) - runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs)) - runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse)) - runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys)) - runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse)) - runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys)) - runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC)) - runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC)) - runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats)) - - // - i := numGC % uint32(len(memStats.PauseNs)) - ii := memStats.NumGC % uint32(len(memStats.PauseNs)) - if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) { - for i = 0; i < uint32(len(memStats.PauseNs)); i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - } else { - if i > ii { - for ; i < uint32(len(memStats.PauseNs)); i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - i = 0 - } - for ; i < ii; i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - } - frees = memStats.Frees - lookups = memStats.Lookups - mallocs = memStats.Mallocs - numGC = memStats.NumGC - - runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs)) - runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse)) - runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys)) - runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys)) - runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc)) - - currentNumCgoCalls := numCgoCall() - runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls) - numCgoCalls = currentNumCgoCalls - - runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine())) - - runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count())) -} - -// Register runtimeMetrics for the Go runtime statistics exported in runtime and -// specifically runtime.MemStats. The runtimeMetrics are named by their -// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc. -func RegisterRuntimeMemStats(r Registry) { - runtimeMetrics.MemStats.Alloc = NewGauge() - runtimeMetrics.MemStats.BuckHashSys = NewGauge() - runtimeMetrics.MemStats.DebugGC = NewGauge() - runtimeMetrics.MemStats.EnableGC = NewGauge() - runtimeMetrics.MemStats.Frees = NewGauge() - runtimeMetrics.MemStats.HeapAlloc = NewGauge() - runtimeMetrics.MemStats.HeapIdle = NewGauge() - runtimeMetrics.MemStats.HeapInuse = NewGauge() - runtimeMetrics.MemStats.HeapObjects = NewGauge() - runtimeMetrics.MemStats.HeapReleased = NewGauge() - runtimeMetrics.MemStats.HeapSys = NewGauge() - runtimeMetrics.MemStats.LastGC = NewGauge() - runtimeMetrics.MemStats.Lookups = NewGauge() - runtimeMetrics.MemStats.Mallocs = NewGauge() - runtimeMetrics.MemStats.MCacheInuse = NewGauge() - runtimeMetrics.MemStats.MCacheSys = NewGauge() - runtimeMetrics.MemStats.MSpanInuse = NewGauge() - runtimeMetrics.MemStats.MSpanSys = NewGauge() - runtimeMetrics.MemStats.NextGC = NewGauge() - runtimeMetrics.MemStats.NumGC = NewGauge() - runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64() - runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) - runtimeMetrics.MemStats.PauseTotalNs = NewGauge() - runtimeMetrics.MemStats.StackInuse = NewGauge() - runtimeMetrics.MemStats.StackSys = NewGauge() - runtimeMetrics.MemStats.Sys = NewGauge() - runtimeMetrics.MemStats.TotalAlloc = NewGauge() - runtimeMetrics.NumCgoCall = NewGauge() - runtimeMetrics.NumGoroutine = NewGauge() - runtimeMetrics.NumThread = NewGauge() - runtimeMetrics.ReadMemStats = NewTimer() - - r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) - r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) - r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) - r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) - r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) - r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) - r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) - r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) - r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) - r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) - r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) - r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) - r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) - r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) - r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) - r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) - r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) - r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) - r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) - r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) - r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction) - r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) - r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) - r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) - r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) - r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) - r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) - r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) - r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) - r.Register("runtime.NumThread", runtimeMetrics.NumThread) - r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) -} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go deleted file mode 100644 index e3391f4e89..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build cgo -// +build !appengine - -package metrics - -import "runtime" - -func numCgoCall() int64 { - return runtime.NumCgoCall() -} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go deleted file mode 100644 index ca12c05bac..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.5 - -package metrics - -import "runtime" - -func gcCPUFraction(memStats *runtime.MemStats) float64 { - return memStats.GCCPUFraction -} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go deleted file mode 100644 index 616a3b4751..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !cgo appengine - -package metrics - -func numCgoCall() int64 { - return 0 -} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go deleted file mode 100644 index be96aa6f1b..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !go1.5 - -package metrics - -import "runtime" - -func gcCPUFraction(memStats *runtime.MemStats) float64 { - return 0 -} diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go deleted file mode 100644 index fecee5ef68..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/sample.go +++ /dev/null @@ -1,616 +0,0 @@ -package metrics - -import ( - "math" - "math/rand" - "sort" - "sync" - "time" -) - -const rescaleThreshold = time.Hour - -// Samples maintain a statistically-significant selection of values from -// a stream. -type Sample interface { - Clear() - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Size() int - Snapshot() Sample - StdDev() float64 - Sum() int64 - Update(int64) - Values() []int64 - Variance() float64 -} - -// ExpDecaySample is an exponentially-decaying sample using a forward-decaying -// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time -// Decay Model for Streaming Systems". -// -// -type ExpDecaySample struct { - alpha float64 - count int64 - mutex sync.Mutex - reservoirSize int - t0, t1 time.Time - values *expDecaySampleHeap -} - -// NewExpDecaySample constructs a new exponentially-decaying sample with the -// given reservoir size and alpha. -func NewExpDecaySample(reservoirSize int, alpha float64) Sample { - if UseNilMetrics { - return NilSample{} - } - s := &ExpDecaySample{ - alpha: alpha, - reservoirSize: reservoirSize, - t0: time.Now(), - values: newExpDecaySampleHeap(reservoirSize), - } - s.t1 = s.t0.Add(rescaleThreshold) - return s -} - -// Clear clears all samples. -func (s *ExpDecaySample) Clear() { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count = 0 - s.t0 = time.Now() - s.t1 = s.t0.Add(rescaleThreshold) - s.values.Clear() -} - -// Count returns the number of samples recorded, which may exceed the -// reservoir size. -func (s *ExpDecaySample) Count() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.count -} - -// Max returns the maximum value in the sample, which may not be the maximum -// value ever to be part of the sample. -func (s *ExpDecaySample) Max() int64 { - return SampleMax(s.Values()) -} - -// Mean returns the mean of the values in the sample. -func (s *ExpDecaySample) Mean() float64 { - return SampleMean(s.Values()) -} - -// Min returns the minimum value in the sample, which may not be the minimum -// value ever to be part of the sample. -func (s *ExpDecaySample) Min() int64 { - return SampleMin(s.Values()) -} - -// Percentile returns an arbitrary percentile of values in the sample. -func (s *ExpDecaySample) Percentile(p float64) float64 { - return SamplePercentile(s.Values(), p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the -// sample. -func (s *ExpDecaySample) Percentiles(ps []float64) []float64 { - return SamplePercentiles(s.Values(), ps) -} - -// Size returns the size of the sample, which is at most the reservoir size. -func (s *ExpDecaySample) Size() int { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.values.Size() -} - -// Snapshot returns a read-only copy of the sample. -func (s *ExpDecaySample) Snapshot() Sample { - s.mutex.Lock() - defer s.mutex.Unlock() - vals := s.values.Values() - values := make([]int64, len(vals)) - for i, v := range vals { - values[i] = v.v - } - return &SampleSnapshot{ - count: s.count, - values: values, - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (s *ExpDecaySample) StdDev() float64 { - return SampleStdDev(s.Values()) -} - -// Sum returns the sum of the values in the sample. -func (s *ExpDecaySample) Sum() int64 { - return SampleSum(s.Values()) -} - -// Update samples a new value. -func (s *ExpDecaySample) Update(v int64) { - s.update(time.Now(), v) -} - -// Values returns a copy of the values in the sample. -func (s *ExpDecaySample) Values() []int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - vals := s.values.Values() - values := make([]int64, len(vals)) - for i, v := range vals { - values[i] = v.v - } - return values -} - -// Variance returns the variance of the values in the sample. -func (s *ExpDecaySample) Variance() float64 { - return SampleVariance(s.Values()) -} - -// update samples a new value at a particular timestamp. This is a method all -// its own to facilitate testing. -func (s *ExpDecaySample) update(t time.Time, v int64) { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count++ - if s.values.Size() == s.reservoirSize { - s.values.Pop() - } - s.values.Push(expDecaySample{ - k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(), - v: v, - }) - if t.After(s.t1) { - values := s.values.Values() - t0 := s.t0 - s.values.Clear() - s.t0 = t - s.t1 = s.t0.Add(rescaleThreshold) - for _, v := range values { - v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds()) - s.values.Push(v) - } - } -} - -// NilSample is a no-op Sample. -type NilSample struct{} - -// Clear is a no-op. -func (NilSample) Clear() {} - -// Count is a no-op. -func (NilSample) Count() int64 { return 0 } - -// Max is a no-op. -func (NilSample) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilSample) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilSample) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilSample) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilSample) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Size is a no-op. -func (NilSample) Size() int { return 0 } - -// Sample is a no-op. -func (NilSample) Snapshot() Sample { return NilSample{} } - -// StdDev is a no-op. -func (NilSample) StdDev() float64 { return 0.0 } - -// Sum is a no-op. -func (NilSample) Sum() int64 { return 0 } - -// Update is a no-op. -func (NilSample) Update(v int64) {} - -// Values is a no-op. -func (NilSample) Values() []int64 { return []int64{} } - -// Variance is a no-op. -func (NilSample) Variance() float64 { return 0.0 } - -// SampleMax returns the maximum value of the slice of int64. -func SampleMax(values []int64) int64 { - if 0 == len(values) { - return 0 - } - var max int64 = math.MinInt64 - for _, v := range values { - if max < v { - max = v - } - } - return max -} - -// SampleMean returns the mean value of the slice of int64. -func SampleMean(values []int64) float64 { - if 0 == len(values) { - return 0.0 - } - return float64(SampleSum(values)) / float64(len(values)) -} - -// SampleMin returns the minimum value of the slice of int64. -func SampleMin(values []int64) int64 { - if 0 == len(values) { - return 0 - } - var min int64 = math.MaxInt64 - for _, v := range values { - if min > v { - min = v - } - } - return min -} - -// SamplePercentiles returns an arbitrary percentile of the slice of int64. -func SamplePercentile(values int64Slice, p float64) float64 { - return SamplePercentiles(values, []float64{p})[0] -} - -// SamplePercentiles returns a slice of arbitrary percentiles of the slice of -// int64. -func SamplePercentiles(values int64Slice, ps []float64) []float64 { - scores := make([]float64, len(ps)) - size := len(values) - if size > 0 { - sort.Sort(values) - for i, p := range ps { - pos := p * float64(size+1) - if pos < 1.0 { - scores[i] = float64(values[0]) - } else if pos >= float64(size) { - scores[i] = float64(values[size-1]) - } else { - lower := float64(values[int(pos)-1]) - upper := float64(values[int(pos)]) - scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) - } - } - } - return scores -} - -// SampleSnapshot is a read-only copy of another Sample. -type SampleSnapshot struct { - count int64 - values []int64 -} - -func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot { - return &SampleSnapshot{ - count: count, - values: values, - } -} - -// Clear panics. -func (*SampleSnapshot) Clear() { - panic("Clear called on a SampleSnapshot") -} - -// Count returns the count of inputs at the time the snapshot was taken. -func (s *SampleSnapshot) Count() int64 { return s.count } - -// Max returns the maximal value at the time the snapshot was taken. -func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) } - -// Mean returns the mean value at the time the snapshot was taken. -func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) } - -// Min returns the minimal value at the time the snapshot was taken. -func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) } - -// Percentile returns an arbitrary percentile of values at the time the -// snapshot was taken. -func (s *SampleSnapshot) Percentile(p float64) float64 { - return SamplePercentile(s.values, p) -} - -// Percentiles returns a slice of arbitrary percentiles of values at the time -// the snapshot was taken. -func (s *SampleSnapshot) Percentiles(ps []float64) []float64 { - return SamplePercentiles(s.values, ps) -} - -// Size returns the size of the sample at the time the snapshot was taken. -func (s *SampleSnapshot) Size() int { return len(s.values) } - -// Snapshot returns the snapshot. -func (s *SampleSnapshot) Snapshot() Sample { return s } - -// StdDev returns the standard deviation of values at the time the snapshot was -// taken. -func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) } - -// Sum returns the sum of values at the time the snapshot was taken. -func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) } - -// Update panics. -func (*SampleSnapshot) Update(int64) { - panic("Update called on a SampleSnapshot") -} - -// Values returns a copy of the values in the sample. -func (s *SampleSnapshot) Values() []int64 { - values := make([]int64, len(s.values)) - copy(values, s.values) - return values -} - -// Variance returns the variance of values at the time the snapshot was taken. -func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) } - -// SampleStdDev returns the standard deviation of the slice of int64. -func SampleStdDev(values []int64) float64 { - return math.Sqrt(SampleVariance(values)) -} - -// SampleSum returns the sum of the slice of int64. -func SampleSum(values []int64) int64 { - var sum int64 - for _, v := range values { - sum += v - } - return sum -} - -// SampleVariance returns the variance of the slice of int64. -func SampleVariance(values []int64) float64 { - if 0 == len(values) { - return 0.0 - } - m := SampleMean(values) - var sum float64 - for _, v := range values { - d := float64(v) - m - sum += d * d - } - return sum / float64(len(values)) -} - -// A uniform sample using Vitter's Algorithm R. -// -// -type UniformSample struct { - count int64 - mutex sync.Mutex - reservoirSize int - values []int64 -} - -// NewUniformSample constructs a new uniform sample with the given reservoir -// size. -func NewUniformSample(reservoirSize int) Sample { - if UseNilMetrics { - return NilSample{} - } - return &UniformSample{ - reservoirSize: reservoirSize, - values: make([]int64, 0, reservoirSize), - } -} - -// Clear clears all samples. -func (s *UniformSample) Clear() { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count = 0 - s.values = make([]int64, 0, s.reservoirSize) -} - -// Count returns the number of samples recorded, which may exceed the -// reservoir size. -func (s *UniformSample) Count() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.count -} - -// Max returns the maximum value in the sample, which may not be the maximum -// value ever to be part of the sample. -func (s *UniformSample) Max() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMax(s.values) -} - -// Mean returns the mean of the values in the sample. -func (s *UniformSample) Mean() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMean(s.values) -} - -// Min returns the minimum value in the sample, which may not be the minimum -// value ever to be part of the sample. -func (s *UniformSample) Min() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMin(s.values) -} - -// Percentile returns an arbitrary percentile of values in the sample. -func (s *UniformSample) Percentile(p float64) float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SamplePercentile(s.values, p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the -// sample. -func (s *UniformSample) Percentiles(ps []float64) []float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SamplePercentiles(s.values, ps) -} - -// Size returns the size of the sample, which is at most the reservoir size. -func (s *UniformSample) Size() int { - s.mutex.Lock() - defer s.mutex.Unlock() - return len(s.values) -} - -// Snapshot returns a read-only copy of the sample. -func (s *UniformSample) Snapshot() Sample { - s.mutex.Lock() - defer s.mutex.Unlock() - values := make([]int64, len(s.values)) - copy(values, s.values) - return &SampleSnapshot{ - count: s.count, - values: values, - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (s *UniformSample) StdDev() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleStdDev(s.values) -} - -// Sum returns the sum of the values in the sample. -func (s *UniformSample) Sum() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleSum(s.values) -} - -// Update samples a new value. -func (s *UniformSample) Update(v int64) { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count++ - if len(s.values) < s.reservoirSize { - s.values = append(s.values, v) - } else { - r := rand.Int63n(s.count) - if r < int64(len(s.values)) { - s.values[int(r)] = v - } - } -} - -// Values returns a copy of the values in the sample. -func (s *UniformSample) Values() []int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - values := make([]int64, len(s.values)) - copy(values, s.values) - return values -} - -// Variance returns the variance of the values in the sample. -func (s *UniformSample) Variance() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleVariance(s.values) -} - -// expDecaySample represents an individual sample in a heap. -type expDecaySample struct { - k float64 - v int64 -} - -func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap { - return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)} -} - -// expDecaySampleHeap is a min-heap of expDecaySamples. -// The internal implementation is copied from the standard library's container/heap -type expDecaySampleHeap struct { - s []expDecaySample -} - -func (h *expDecaySampleHeap) Clear() { - h.s = h.s[:0] -} - -func (h *expDecaySampleHeap) Push(s expDecaySample) { - n := len(h.s) - h.s = h.s[0 : n+1] - h.s[n] = s - h.up(n) -} - -func (h *expDecaySampleHeap) Pop() expDecaySample { - n := len(h.s) - 1 - h.s[0], h.s[n] = h.s[n], h.s[0] - h.down(0, n) - - n = len(h.s) - s := h.s[n-1] - h.s = h.s[0 : n-1] - return s -} - -func (h *expDecaySampleHeap) Size() int { - return len(h.s) -} - -func (h *expDecaySampleHeap) Values() []expDecaySample { - return h.s -} - -func (h *expDecaySampleHeap) up(j int) { - for { - i := (j - 1) / 2 // parent - if i == j || !(h.s[j].k < h.s[i].k) { - break - } - h.s[i], h.s[j] = h.s[j], h.s[i] - j = i - } -} - -func (h *expDecaySampleHeap) down(i, n int) { - for { - j1 := 2*i + 1 - if j1 >= n || j1 < 0 { // j1 < 0 after int overflow - break - } - j := j1 // left child - if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) { - j = j2 // = 2*i + 2 // right child - } - if !(h.s[j].k < h.s[i].k) { - break - } - h.s[i], h.s[j] = h.s[j], h.s[i] - i = j - } -} - -type int64Slice []int64 - -func (p int64Slice) Len() int { return len(p) } -func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go deleted file mode 100644 index 693f190855..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/syslog.go +++ /dev/null @@ -1,78 +0,0 @@ -// +build !windows - -package metrics - -import ( - "fmt" - "log/syslog" - "time" -) - -// Output each metric in the given registry to syslog periodically using -// the given syslogger. -func Syslog(r Registry, d time.Duration, w *syslog.Writer) { - for _ = range time.Tick(d) { - r.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count())) - case Gauge: - w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value())) - case GaugeFloat64: - w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value())) - case Healthcheck: - metric.Check() - w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error())) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - w.Info(fmt.Sprintf( - "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f", - name, - h.Count(), - h.Min(), - h.Max(), - h.Mean(), - h.StdDev(), - ps[0], - ps[1], - ps[2], - ps[3], - ps[4], - )) - case Meter: - m := metric.Snapshot() - w.Info(fmt.Sprintf( - "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f", - name, - m.Count(), - m.Rate1(), - m.Rate5(), - m.Rate15(), - m.RateMean(), - )) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - w.Info(fmt.Sprintf( - "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f", - name, - t.Count(), - t.Min(), - t.Max(), - t.Mean(), - t.StdDev(), - ps[0], - ps[1], - ps[2], - ps[3], - ps[4], - t.Rate1(), - t.Rate5(), - t.Rate15(), - t.RateMean(), - )) - } - }) - } -} diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go deleted file mode 100644 index d6ec4c6260..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/timer.go +++ /dev/null @@ -1,329 +0,0 @@ -package metrics - -import ( - "sync" - "time" -) - -// Timers capture the duration and rate of events. -type Timer interface { - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Rate1() float64 - Rate5() float64 - Rate15() float64 - RateMean() float64 - Snapshot() Timer - StdDev() float64 - Stop() - Sum() int64 - Time(func()) - Update(time.Duration) - UpdateSince(time.Time) - Variance() float64 -} - -// GetOrRegisterTimer returns an existing Timer or constructs and registers a -// new StandardTimer. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func GetOrRegisterTimer(name string, r Registry) Timer { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewTimer).(Timer) -} - -// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. -// Be sure to call Stop() once the timer is of no use to allow for garbage collection. -func NewCustomTimer(h Histogram, m Meter) Timer { - if UseNilMetrics { - return NilTimer{} - } - return &StandardTimer{ - histogram: h, - meter: m, - } -} - -// NewRegisteredTimer constructs and registers a new StandardTimer. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func NewRegisteredTimer(name string, r Registry) Timer { - c := NewTimer() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewTimer constructs a new StandardTimer using an exponentially-decaying -// sample with the same reservoir size and alpha as UNIX load averages. -// Be sure to call Stop() once the timer is of no use to allow for garbage collection. -func NewTimer() Timer { - if UseNilMetrics { - return NilTimer{} - } - return &StandardTimer{ - histogram: NewHistogram(NewExpDecaySample(1028, 0.015)), - meter: NewMeter(), - } -} - -// NilTimer is a no-op Timer. -type NilTimer struct { - h Histogram - m Meter -} - -// Count is a no-op. -func (NilTimer) Count() int64 { return 0 } - -// Max is a no-op. -func (NilTimer) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilTimer) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilTimer) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilTimer) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilTimer) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Rate1 is a no-op. -func (NilTimer) Rate1() float64 { return 0.0 } - -// Rate5 is a no-op. -func (NilTimer) Rate5() float64 { return 0.0 } - -// Rate15 is a no-op. -func (NilTimer) Rate15() float64 { return 0.0 } - -// RateMean is a no-op. -func (NilTimer) RateMean() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilTimer) Snapshot() Timer { return NilTimer{} } - -// StdDev is a no-op. -func (NilTimer) StdDev() float64 { return 0.0 } - -// Stop is a no-op. -func (NilTimer) Stop() {} - -// Sum is a no-op. -func (NilTimer) Sum() int64 { return 0 } - -// Time is a no-op. -func (NilTimer) Time(func()) {} - -// Update is a no-op. -func (NilTimer) Update(time.Duration) {} - -// UpdateSince is a no-op. -func (NilTimer) UpdateSince(time.Time) {} - -// Variance is a no-op. -func (NilTimer) Variance() float64 { return 0.0 } - -// StandardTimer is the standard implementation of a Timer and uses a Histogram -// and Meter. -type StandardTimer struct { - histogram Histogram - meter Meter - mutex sync.Mutex -} - -// Count returns the number of events recorded. -func (t *StandardTimer) Count() int64 { - return t.histogram.Count() -} - -// Max returns the maximum value in the sample. -func (t *StandardTimer) Max() int64 { - return t.histogram.Max() -} - -// Mean returns the mean of the values in the sample. -func (t *StandardTimer) Mean() float64 { - return t.histogram.Mean() -} - -// Min returns the minimum value in the sample. -func (t *StandardTimer) Min() int64 { - return t.histogram.Min() -} - -// Percentile returns an arbitrary percentile of the values in the sample. -func (t *StandardTimer) Percentile(p float64) float64 { - return t.histogram.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of the values in the -// sample. -func (t *StandardTimer) Percentiles(ps []float64) []float64 { - return t.histogram.Percentiles(ps) -} - -// Rate1 returns the one-minute moving average rate of events per second. -func (t *StandardTimer) Rate1() float64 { - return t.meter.Rate1() -} - -// Rate5 returns the five-minute moving average rate of events per second. -func (t *StandardTimer) Rate5() float64 { - return t.meter.Rate5() -} - -// Rate15 returns the fifteen-minute moving average rate of events per second. -func (t *StandardTimer) Rate15() float64 { - return t.meter.Rate15() -} - -// RateMean returns the meter's mean rate of events per second. -func (t *StandardTimer) RateMean() float64 { - return t.meter.RateMean() -} - -// Snapshot returns a read-only copy of the timer. -func (t *StandardTimer) Snapshot() Timer { - t.mutex.Lock() - defer t.mutex.Unlock() - return &TimerSnapshot{ - histogram: t.histogram.Snapshot().(*HistogramSnapshot), - meter: t.meter.Snapshot().(*MeterSnapshot), - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (t *StandardTimer) StdDev() float64 { - return t.histogram.StdDev() -} - -// Stop stops the meter. -func (t *StandardTimer) Stop() { - t.meter.Stop() -} - -// Sum returns the sum in the sample. -func (t *StandardTimer) Sum() int64 { - return t.histogram.Sum() -} - -// Record the duration of the execution of the given function. -func (t *StandardTimer) Time(f func()) { - ts := time.Now() - f() - t.Update(time.Since(ts)) -} - -// Record the duration of an event. -func (t *StandardTimer) Update(d time.Duration) { - t.mutex.Lock() - defer t.mutex.Unlock() - t.histogram.Update(int64(d)) - t.meter.Mark(1) -} - -// Record the duration of an event that started at a time and ends now. -func (t *StandardTimer) UpdateSince(ts time.Time) { - t.mutex.Lock() - defer t.mutex.Unlock() - t.histogram.Update(int64(time.Since(ts))) - t.meter.Mark(1) -} - -// Variance returns the variance of the values in the sample. -func (t *StandardTimer) Variance() float64 { - return t.histogram.Variance() -} - -// TimerSnapshot is a read-only copy of another Timer. -type TimerSnapshot struct { - histogram *HistogramSnapshot - meter *MeterSnapshot -} - -// Count returns the number of events recorded at the time the snapshot was -// taken. -func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() } - -// Max returns the maximum value at the time the snapshot was taken. -func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() } - -// Mean returns the mean value at the time the snapshot was taken. -func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() } - -// Min returns the minimum value at the time the snapshot was taken. -func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() } - -// Percentile returns an arbitrary percentile of sampled values at the time the -// snapshot was taken. -func (t *TimerSnapshot) Percentile(p float64) float64 { - return t.histogram.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of sampled values at -// the time the snapshot was taken. -func (t *TimerSnapshot) Percentiles(ps []float64) []float64 { - return t.histogram.Percentiles(ps) -} - -// Rate1 returns the one-minute moving average rate of events per second at the -// time the snapshot was taken. -func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() } - -// Rate5 returns the five-minute moving average rate of events per second at -// the time the snapshot was taken. -func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() } - -// Rate15 returns the fifteen-minute moving average rate of events per second -// at the time the snapshot was taken. -func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() } - -// RateMean returns the meter's mean rate of events per second at the time the -// snapshot was taken. -func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() } - -// Snapshot returns the snapshot. -func (t *TimerSnapshot) Snapshot() Timer { return t } - -// StdDev returns the standard deviation of the values at the time the snapshot -// was taken. -func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } - -// Stop is a no-op. -func (t *TimerSnapshot) Stop() {} - -// Sum returns the sum at the time the snapshot was taken. -func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() } - -// Time panics. -func (*TimerSnapshot) Time(func()) { - panic("Time called on a TimerSnapshot") -} - -// Update panics. -func (*TimerSnapshot) Update(time.Duration) { - panic("Update called on a TimerSnapshot") -} - -// UpdateSince panics. -func (*TimerSnapshot) UpdateSince(time.Time) { - panic("UpdateSince called on a TimerSnapshot") -} - -// Variance returns the variance of the values at the time the snapshot was -// taken. -func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() } diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh deleted file mode 100755 index c4ae91e642..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/validate.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -e - -# check there are no formatting issues -GOFMT_LINES=`gofmt -l . | wc -l | xargs` -test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues" - -# run the tests for the root package -go test -race . diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go deleted file mode 100644 index 091e971d2e..0000000000 --- a/vendor/github.com/rcrowley/go-metrics/writer.go +++ /dev/null @@ -1,100 +0,0 @@ -package metrics - -import ( - "fmt" - "io" - "sort" - "time" -) - -// Write sorts writes each metric in the given registry periodically to the -// given io.Writer. -func Write(r Registry, d time.Duration, w io.Writer) { - for _ = range time.Tick(d) { - WriteOnce(r, w) - } -} - -// WriteOnce sorts and writes metrics in the given registry to the given -// io.Writer. -func WriteOnce(r Registry, w io.Writer) { - var namedMetrics namedMetricSlice - r.Each(func(name string, i interface{}) { - namedMetrics = append(namedMetrics, namedMetric{name, i}) - }) - - sort.Sort(namedMetrics) - for _, namedMetric := range namedMetrics { - switch metric := namedMetric.m.(type) { - case Counter: - fmt.Fprintf(w, "counter %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", metric.Count()) - case Gauge: - fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %9d\n", metric.Value()) - case GaugeFloat64: - fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %f\n", metric.Value()) - case Healthcheck: - metric.Check() - fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name) - fmt.Fprintf(w, " error: %v\n", metric.Error()) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "histogram %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", h.Count()) - fmt.Fprintf(w, " min: %9d\n", h.Min()) - fmt.Fprintf(w, " max: %9d\n", h.Max()) - fmt.Fprintf(w, " mean: %12.2f\n", h.Mean()) - fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev()) - fmt.Fprintf(w, " median: %12.2f\n", ps[0]) - fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) - fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) - fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) - fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "meter %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", m.Count()) - fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1()) - fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5()) - fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15()) - fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean()) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "timer %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", t.Count()) - fmt.Fprintf(w, " min: %9d\n", t.Min()) - fmt.Fprintf(w, " max: %9d\n", t.Max()) - fmt.Fprintf(w, " mean: %12.2f\n", t.Mean()) - fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev()) - fmt.Fprintf(w, " median: %12.2f\n", ps[0]) - fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) - fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) - fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) - fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) - fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1()) - fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5()) - fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15()) - fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean()) - } - } -} - -type namedMetric struct { - name string - m interface{} -} - -// namedMetricSlice is a slice of namedMetrics that implements sort.Interface. -type namedMetricSlice []namedMetric - -func (nms namedMetricSlice) Len() int { return len(nms) } - -func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] } - -func (nms namedMetricSlice) Less(i, j int) bool { - return nms[i].name < nms[j].name -} diff --git a/vendor/github.com/willfaught/gockle/.travis.yml b/vendor/github.com/willfaught/gockle/.travis.yml deleted file mode 100644 index 58127a3b6e..0000000000 --- a/vendor/github.com/willfaught/gockle/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -go: - - tip -install: - - go get -v ./... - - go get -v github.com/golang/lint/golint - - go get -v github.com/mattn/goveralls -before_script: - - go vet ./... -script: - - go test -v -covermode count -coverprofile cover.out ./... - - goveralls -coverprofile cover.out -service travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/willfaught/gockle/batch.go b/vendor/github.com/willfaught/gockle/batch.go deleted file mode 100644 index 0d67326160..0000000000 --- a/vendor/github.com/willfaught/gockle/batch.go +++ /dev/null @@ -1,108 +0,0 @@ -package gockle - -import ( - "github.com/gocql/gocql" - "github.com/maraino/go-mock" -) - -// ColumnApplied is the name of a special column that has a bool that indicates -// whether a conditional statement was applied. -const ColumnApplied = "[applied]" - -// Batch is an ordered collection of CQL queries. -type Batch interface { - // Add adds the query for statement and arguments. - Add(statement string, arguments ...interface{}) - - // Exec executes the queries in the order they were added. - Exec() error - - // ExecTx executes the queries in the order they were added. It returns a slice - // of maps from columns to values, the maps corresponding to all the conditional - // queries, and ordered in the same relative order. The special column - // ColumnApplied has a bool that indicates whether the conditional statement was - // applied. If a conditional statement was not applied, the current values for - // the columns are put into the map. - ExecTx() ([]map[string]interface{}, error) -} - -var ( - _ Batch = BatchMock{} - _ Batch = batch{} -) - -// BatchKind is the kind of Batch. The choice of kind mostly affects performance. -type BatchKind byte - -// Kinds of batches. -const ( - // BatchLogged queries are atomic. Queries are only isolated within a single - // partition. - BatchLogged BatchKind = 0 - - // BatchUnlogged queries are not atomic. Atomic queries spanning multiple partitions cost performance. - BatchUnlogged BatchKind = 1 - - // BatchCounter queries update counters and are not idempotent. - BatchCounter BatchKind = 2 -) - -// BatchMock is a mock Batch. See github.com/maraino/go-mock. -type BatchMock struct { - mock.Mock -} - -// Add implements Batch. -func (m BatchMock) Add(statement string, arguments ...interface{}) { - m.Called(statement, arguments) -} - -// Exec implements Batch. -func (m BatchMock) Exec() error { - return m.Called().Error(0) -} - -// ExecTx implements Batch. -func (m BatchMock) ExecTx() ([]map[string]interface{}, error) { - var r = m.Called() - - return r.Get(0).([]map[string]interface{}), r.Error(1) -} - -type batch struct { - b *gocql.Batch - - s *gocql.Session -} - -func (b batch) Add(statement string, arguments ...interface{}) { - b.b.Query(statement, arguments...) -} - -func (b batch) Exec() error { - return b.s.ExecuteBatch(b.b) -} - -func (b batch) ExecTx() ([]map[string]interface{}, error) { - var m = map[string]interface{}{} - var a, i, err = b.s.MapExecuteBatchCAS(b.b, m) - - if err != nil { - return nil, err - } - - s, err := i.SliceMap() - - if err != nil { - return nil, err - } - - if err := i.Close(); err != nil { - return nil, err - } - - m[ColumnApplied] = a - s = append([]map[string]interface{}{m}, s...) - - return s, nil -} diff --git a/vendor/github.com/willfaught/gockle/doc.go b/vendor/github.com/willfaught/gockle/doc.go deleted file mode 100644 index 9e7e3d5f23..0000000000 --- a/vendor/github.com/willfaught/gockle/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Package gockle simplifies and mocks github.com/gocql/gocql. It provides -// simple interfaces to insert, query, and mutate Cassandra data, as well as get -// basic keyspace and table metadata. -// -// The entry points are NewSession and NewSimpleSession. Call them to get a -// Session. Session interacts with the database. It executes queries and batched -// queries and iterates result rows. Closing the Session closes the underlying -// gocql.Session, including the one passed to NewSimpleSession. -// -// Mocks are provided for testing use of Batch, Iterator, and Session. -// -// Tx is short for transaction. -// -// The name gockle comes from a pronunciation of gocql. -package gockle diff --git a/vendor/github.com/willfaught/gockle/iterator.go b/vendor/github.com/willfaught/gockle/iterator.go deleted file mode 100644 index 85c9dd5ecd..0000000000 --- a/vendor/github.com/willfaught/gockle/iterator.go +++ /dev/null @@ -1,61 +0,0 @@ -package gockle - -import ( - "github.com/gocql/gocql" - "github.com/maraino/go-mock" -) - -// Iterator iterates CQL query result rows. -type Iterator interface { - // Close closes the Iterator. - Close() error - - // Scan puts the current result row in results and returns whether there are - // more result rows. - Scan(results ...interface{}) bool - - // ScanMap puts the current result row in results and returns whether there are - // more result rows. - ScanMap(results map[string]interface{}) bool -} - -var ( - _ Iterator = IteratorMock{} - _ Iterator = iterator{} -) - -// IteratorMock is a mock Iterator. See github.com/maraino/go-mock. -type IteratorMock struct { - mock.Mock -} - -// Close implements Iterator. -func (m IteratorMock) Close() error { - return m.Called().Error(0) -} - -// Scan implements Iterator. -func (m IteratorMock) Scan(results ...interface{}) bool { - return m.Called(results).Bool(0) -} - -// ScanMap implements Iterator. -func (m IteratorMock) ScanMap(results map[string]interface{}) bool { - return m.Called(results).Bool(0) -} - -type iterator struct { - i *gocql.Iter -} - -func (i iterator) Close() error { - return i.i.Close() -} - -func (i iterator) Scan(results ...interface{}) bool { - return i.i.Scan(results...) -} - -func (i iterator) ScanMap(results map[string]interface{}) bool { - return i.i.MapScan(results) -} diff --git a/vendor/github.com/willfaught/gockle/license.md b/vendor/github.com/willfaught/gockle/license.md deleted file mode 100644 index d88bd0618e..0000000000 --- a/vendor/github.com/willfaught/gockle/license.md +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Will Faught - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/willfaught/gockle/readme.md b/vendor/github.com/willfaught/gockle/readme.md deleted file mode 100644 index ec870f7488..0000000000 --- a/vendor/github.com/willfaught/gockle/readme.md +++ /dev/null @@ -1,8 +0,0 @@ -# gockle - -[![Documentation](https://godoc.org/github.com/willfaught/gockle?status.svg)](https://godoc.org/github.com/willfaught/gockle) -[![Build](https://travis-ci.org/willfaught/gockle.svg?branch=master)](https://travis-ci.org/willfaught/gockle) -[![Report](https://goreportcard.com/badge/github.com/willfaught/gockle)](https://goreportcard.com/report/github.com/willfaught/gockle) -[![Test Coverage](https://coveralls.io/repos/github/willfaught/gockle/badge.svg?branch=master)](https://coveralls.io/github/willfaught/gockle?branch=master) - -*Note: Test coverage is low because there is no Cassandra database for the tests to use. Providing one yields 97.37% test coverage. Some code is uncovered because gocql cannot be mocked. This is one difficulty your code avoids by using gockle.* diff --git a/vendor/github.com/willfaught/gockle/session.go b/vendor/github.com/willfaught/gockle/session.go deleted file mode 100644 index a56336bce4..0000000000 --- a/vendor/github.com/willfaught/gockle/session.go +++ /dev/null @@ -1,228 +0,0 @@ -package gockle - -import ( - "fmt" - - "github.com/gocql/gocql" - "github.com/maraino/go-mock" -) - -func metadata(s *gocql.Session, keyspace string) (*gocql.KeyspaceMetadata, error) { - var m, err = s.KeyspaceMetadata(keyspace) - - if err != nil { - return nil, err - } - - if !m.DurableWrites && m.Name == keyspace && m.StrategyClass == "" && len(m.StrategyOptions) == 0 && len(m.Tables) == 0 { - return nil, fmt.Errorf("gockle: keyspace %v invalid", keyspace) - } - - return m, nil -} - -// Session is a Cassandra connection. The Query methods run CQL queries. The -// Columns and Tables methods provide simple metadata. -type Session interface { - // Batch returns a new Batch for the Session. - Batch(kind BatchKind) Batch - - // Close closes the Session. - Close() - - // Columns returns a map from column names to types for keyspace and table. - // Schema changes during a session are not reflected; you must open a new - // Session to observe them. - Columns(keyspace, table string) (map[string]gocql.TypeInfo, error) - - // Exec executes the query for statement and arguments. - Exec(statement string, arguments ...interface{}) error - - // Scan executes the query for statement and arguments and puts the first - // result row in results. - Scan(statement string, results []interface{}, arguments ...interface{}) error - - // ScanIterator executes the query for statement and arguments and returns an - // Iterator for the results. - ScanIterator(statement string, arguments ...interface{}) Iterator - - // ScanMap executes the query for statement and arguments and puts the first - // result row in results. - ScanMap(statement string, results map[string]interface{}, arguments ...interface{}) error - - // ScanMapSlice executes the query for statement and arguments and returns all - // the result rows. - ScanMapSlice(statement string, arguments ...interface{}) ([]map[string]interface{}, error) - - // ScanMapTx executes the query for statement and arguments as a lightweight - // transaction. If the query is not applied, it puts the current values for the - // conditional columns in results. It returns whether the query is applied. - ScanMapTx(statement string, results map[string]interface{}, arguments ...interface{}) (bool, error) - - // Tables returns the table names for keyspace. Schema changes during a session - // are not reflected; you must open a new Session to observe them. - Tables(keyspace string) ([]string, error) -} - -var ( - _ Session = SessionMock{} - _ Session = session{} -) - -// NewSession returns a new Session for s. -func NewSession(s *gocql.Session) Session { - return session{s: s} -} - -// NewSimpleSession returns a new Session for hosts. It uses native protocol -// version 4. -func NewSimpleSession(hosts ...string) (Session, error) { - var c = gocql.NewCluster(hosts...) - - c.ProtoVersion = 4 - - var s, err = c.CreateSession() - - if err != nil { - return nil, err - } - - return session{s: s}, nil -} - -// SessionMock is a mock Session. See github.com/maraino/go-mock. -type SessionMock struct { - mock.Mock -} - -// Batch implements Session. -func (m SessionMock) Batch(kind BatchKind) Batch { - return m.Called(kind).Get(0).(Batch) -} - -// Close implements Session. -func (m SessionMock) Close() { - m.Called() -} - -// Columns implements Session. -func (m SessionMock) Columns(keyspace, table string) (map[string]gocql.TypeInfo, error) { - var r = m.Called(keyspace, table) - - return r.Get(0).(map[string]gocql.TypeInfo), r.Error(1) -} - -// Exec implements Session. -func (m SessionMock) Exec(statement string, arguments ...interface{}) error { - return m.Called(statement, arguments).Error(0) -} - -// Scan implements Session. -func (m SessionMock) Scan(statement string, results []interface{}, arguments ...interface{}) error { - return m.Called(statement, results, arguments).Error(0) -} - -// ScanIterator implements Session. -func (m SessionMock) ScanIterator(statement string, arguments ...interface{}) Iterator { - return m.Called(statement, arguments).Get(0).(Iterator) -} - -// ScanMap implements Session. -func (m SessionMock) ScanMap(statement string, results map[string]interface{}, arguments ...interface{}) error { - return m.Called(statement, results, arguments).Error(0) -} - -// ScanMapSlice implements Session. -func (m SessionMock) ScanMapSlice(statement string, arguments ...interface{}) ([]map[string]interface{}, error) { - var r = m.Called(statement, arguments) - - return r.Get(0).([]map[string]interface{}), r.Error(1) -} - -// ScanMapTx implements Session. -func (m SessionMock) ScanMapTx(statement string, results map[string]interface{}, arguments ...interface{}) (bool, error) { - var r = m.Called(statement, results, arguments) - - return r.Bool(0), r.Error(1) -} - -// Tables implements Session. -func (m SessionMock) Tables(keyspace string) ([]string, error) { - var r = m.Called(keyspace) - - return r.Get(0).([]string), r.Error(1) -} - -type session struct { - s *gocql.Session -} - -func (s session) Batch(kind BatchKind) Batch { - return batch{b: s.s.NewBatch(gocql.BatchType(kind)), s: s.s} -} - -func (s session) Close() { - s.s.Close() -} - -func (s session) Columns(keyspace, table string) (map[string]gocql.TypeInfo, error) { - var m, err = metadata(s.s, keyspace) - - if err != nil { - return nil, err - } - - var t, ok = m.Tables[table] - - if !ok { - return nil, fmt.Errorf("gockle: table %v.%v invalid", keyspace, table) - } - - var types = map[string]gocql.TypeInfo{} - - for n, c := range t.Columns { - types[n] = c.Type - } - - return types, nil -} - -func (s session) Exec(statement string, arguments ...interface{}) error { - return s.s.Query(statement, arguments...).Exec() -} - -func (s session) Scan(statement string, results []interface{}, arguments ...interface{}) error { - return s.s.Query(statement, arguments...).Scan(results...) -} - -func (s session) ScanIterator(statement string, arguments ...interface{}) Iterator { - return iterator{i: s.s.Query(statement, arguments...).Iter()} -} - -func (s session) ScanMap(statement string, results map[string]interface{}, arguments ...interface{}) error { - return s.s.Query(statement, arguments...).MapScan(results) -} - -func (s session) ScanMapSlice(statement string, arguments ...interface{}) ([]map[string]interface{}, error) { - return s.s.Query(statement, arguments...).Iter().SliceMap() -} - -func (s session) ScanMapTx(statement string, results map[string]interface{}, arguments ...interface{}) (bool, error) { - return s.s.Query(statement, arguments...).MapScanCAS(results) -} - -func (s session) Tables(keyspace string) ([]string, error) { - var m, err = metadata(s.s, keyspace) - - if err != nil { - return nil, err - } - - var ts []string - - for t := range m.Tables { - ts = append(ts, t) - } - - return ts, nil -} diff --git a/vendor/gopkg.in/inf.v0/LICENSE b/vendor/gopkg.in/inf.v0/LICENSE deleted file mode 100644 index 87a5cede33..0000000000 --- a/vendor/gopkg.in/inf.v0/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go -Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/inf.v0/dec.go b/vendor/gopkg.in/inf.v0/dec.go deleted file mode 100644 index 3b4afedf1a..0000000000 --- a/vendor/gopkg.in/inf.v0/dec.go +++ /dev/null @@ -1,615 +0,0 @@ -// Package inf (type inf.Dec) implements "infinite-precision" decimal -// arithmetic. -// "Infinite precision" describes two characteristics: practically unlimited -// precision for decimal number representation and no support for calculating -// with any specific fixed precision. -// (Although there is no practical limit on precision, inf.Dec can only -// represent finite decimals.) -// -// This package is currently in experimental stage and the API may change. -// -// This package does NOT support: -// - rounding to specific precisions (as opposed to specific decimal positions) -// - the notion of context (each rounding must be explicit) -// - NaN and Inf values, and distinguishing between positive and negative zero -// - conversions to and from float32/64 types -// -// Features considered for possible addition: -// + formatting options -// + Exp method -// + combined operations such as AddRound/MulAdd etc -// + exchanging data in decimal32/64/128 formats -// -package inf // import "gopkg.in/inf.v0" - -// TODO: -// - avoid excessive deep copying (quo and rounders) - -import ( - "fmt" - "io" - "math/big" - "strings" -) - -// A Dec represents a signed arbitrary-precision decimal. -// It is a combination of a sign, an arbitrary-precision integer coefficient -// value, and a signed fixed-precision exponent value. -// The sign and the coefficient value are handled together as a signed value -// and referred to as the unscaled value. -// (Positive and negative zero values are not distinguished.) -// Since the exponent is most commonly non-positive, it is handled in negated -// form and referred to as scale. -// -// The mathematical value of a Dec equals: -// -// unscaled * 10**(-scale) -// -// Note that different Dec representations may have equal mathematical values. -// -// unscaled scale String() -// ------------------------- -// 0 0 "0" -// 0 2 "0.00" -// 0 -2 "0" -// 1 0 "1" -// 100 2 "1.00" -// 10 0 "10" -// 1 -1 "10" -// -// The zero value for a Dec represents the value 0 with scale 0. -// -// Operations are typically performed through the *Dec type. -// The semantics of the assignment operation "=" for "bare" Dec values is -// undefined and should not be relied on. -// -// Methods are typically of the form: -// -// func (z *Dec) Op(x, y *Dec) *Dec -// -// and implement operations z = x Op y with the result as receiver; if it -// is one of the operands it may be overwritten (and its memory reused). -// To enable chaining of operations, the result is also returned. Methods -// returning a result other than *Dec take one of the operands as the receiver. -// -// A "bare" Quo method (quotient / division operation) is not provided, as the -// result is not always a finite decimal and thus in general cannot be -// represented as a Dec. -// Instead, in the common case when rounding is (potentially) necessary, -// QuoRound should be used with a Scale and a Rounder. -// QuoExact or QuoRound with RoundExact can be used in the special cases when it -// is known that the result is always a finite decimal. -// -type Dec struct { - unscaled big.Int - scale Scale -} - -// Scale represents the type used for the scale of a Dec. -type Scale int32 - -const scaleSize = 4 // bytes in a Scale value - -// Scaler represents a method for obtaining the scale to use for the result of -// an operation on x and y. -type scaler interface { - Scale(x *Dec, y *Dec) Scale -} - -var bigInt = [...]*big.Int{ - big.NewInt(0), big.NewInt(1), big.NewInt(2), big.NewInt(3), big.NewInt(4), - big.NewInt(5), big.NewInt(6), big.NewInt(7), big.NewInt(8), big.NewInt(9), - big.NewInt(10), -} - -var exp10cache [64]big.Int = func() [64]big.Int { - e10, e10i := [64]big.Int{}, bigInt[1] - for i, _ := range e10 { - e10[i].Set(e10i) - e10i = new(big.Int).Mul(e10i, bigInt[10]) - } - return e10 -}() - -// NewDec allocates and returns a new Dec set to the given int64 unscaled value -// and scale. -func NewDec(unscaled int64, scale Scale) *Dec { - return new(Dec).SetUnscaled(unscaled).SetScale(scale) -} - -// NewDecBig allocates and returns a new Dec set to the given *big.Int unscaled -// value and scale. -func NewDecBig(unscaled *big.Int, scale Scale) *Dec { - return new(Dec).SetUnscaledBig(unscaled).SetScale(scale) -} - -// Scale returns the scale of x. -func (x *Dec) Scale() Scale { - return x.scale -} - -// Unscaled returns the unscaled value of x for u and true for ok when the -// unscaled value can be represented as int64; otherwise it returns an undefined -// int64 value for u and false for ok. Use x.UnscaledBig().Int64() to avoid -// checking the validity of the value when the check is known to be redundant. -func (x *Dec) Unscaled() (u int64, ok bool) { - u = x.unscaled.Int64() - var i big.Int - ok = i.SetInt64(u).Cmp(&x.unscaled) == 0 - return -} - -// UnscaledBig returns the unscaled value of x as *big.Int. -func (x *Dec) UnscaledBig() *big.Int { - return &x.unscaled -} - -// SetScale sets the scale of z, with the unscaled value unchanged, and returns -// z. -// The mathematical value of the Dec changes as if it was multiplied by -// 10**(oldscale-scale). -func (z *Dec) SetScale(scale Scale) *Dec { - z.scale = scale - return z -} - -// SetUnscaled sets the unscaled value of z, with the scale unchanged, and -// returns z. -func (z *Dec) SetUnscaled(unscaled int64) *Dec { - z.unscaled.SetInt64(unscaled) - return z -} - -// SetUnscaledBig sets the unscaled value of z, with the scale unchanged, and -// returns z. -func (z *Dec) SetUnscaledBig(unscaled *big.Int) *Dec { - z.unscaled.Set(unscaled) - return z -} - -// Set sets z to the value of x and returns z. -// It does nothing if z == x. -func (z *Dec) Set(x *Dec) *Dec { - if z != x { - z.SetUnscaledBig(x.UnscaledBig()) - z.SetScale(x.Scale()) - } - return z -} - -// Sign returns: -// -// -1 if x < 0 -// 0 if x == 0 -// +1 if x > 0 -// -func (x *Dec) Sign() int { - return x.UnscaledBig().Sign() -} - -// Neg sets z to -x and returns z. -func (z *Dec) Neg(x *Dec) *Dec { - z.SetScale(x.Scale()) - z.UnscaledBig().Neg(x.UnscaledBig()) - return z -} - -// Cmp compares x and y and returns: -// -// -1 if x < y -// 0 if x == y -// +1 if x > y -// -func (x *Dec) Cmp(y *Dec) int { - xx, yy := upscale(x, y) - return xx.UnscaledBig().Cmp(yy.UnscaledBig()) -} - -// Abs sets z to |x| (the absolute value of x) and returns z. -func (z *Dec) Abs(x *Dec) *Dec { - z.SetScale(x.Scale()) - z.UnscaledBig().Abs(x.UnscaledBig()) - return z -} - -// Add sets z to the sum x+y and returns z. -// The scale of z is the greater of the scales of x and y. -func (z *Dec) Add(x, y *Dec) *Dec { - xx, yy := upscale(x, y) - z.SetScale(xx.Scale()) - z.UnscaledBig().Add(xx.UnscaledBig(), yy.UnscaledBig()) - return z -} - -// Sub sets z to the difference x-y and returns z. -// The scale of z is the greater of the scales of x and y. -func (z *Dec) Sub(x, y *Dec) *Dec { - xx, yy := upscale(x, y) - z.SetScale(xx.Scale()) - z.UnscaledBig().Sub(xx.UnscaledBig(), yy.UnscaledBig()) - return z -} - -// Mul sets z to the product x*y and returns z. -// The scale of z is the sum of the scales of x and y. -func (z *Dec) Mul(x, y *Dec) *Dec { - z.SetScale(x.Scale() + y.Scale()) - z.UnscaledBig().Mul(x.UnscaledBig(), y.UnscaledBig()) - return z -} - -// Round sets z to the value of x rounded to Scale s using Rounder r, and -// returns z. -func (z *Dec) Round(x *Dec, s Scale, r Rounder) *Dec { - return z.QuoRound(x, NewDec(1, 0), s, r) -} - -// QuoRound sets z to the quotient x/y, rounded using the given Rounder to the -// specified scale. -// -// If the rounder is RoundExact but the result can not be expressed exactly at -// the specified scale, QuoRound returns nil, and the value of z is undefined. -// -// There is no corresponding Div method; the equivalent can be achieved through -// the choice of Rounder used. -// -func (z *Dec) QuoRound(x, y *Dec, s Scale, r Rounder) *Dec { - return z.quo(x, y, sclr{s}, r) -} - -func (z *Dec) quo(x, y *Dec, s scaler, r Rounder) *Dec { - scl := s.Scale(x, y) - var zzz *Dec - if r.UseRemainder() { - zz, rA, rB := new(Dec).quoRem(x, y, scl, true, new(big.Int), new(big.Int)) - zzz = r.Round(new(Dec), zz, rA, rB) - } else { - zz, _, _ := new(Dec).quoRem(x, y, scl, false, nil, nil) - zzz = r.Round(new(Dec), zz, nil, nil) - } - if zzz == nil { - return nil - } - return z.Set(zzz) -} - -// QuoExact sets z to the quotient x/y and returns z when x/y is a finite -// decimal. Otherwise it returns nil and the value of z is undefined. -// -// The scale of a non-nil result is "x.Scale() - y.Scale()" or greater; it is -// calculated so that the remainder will be zero whenever x/y is a finite -// decimal. -func (z *Dec) QuoExact(x, y *Dec) *Dec { - return z.quo(x, y, scaleQuoExact{}, RoundExact) -} - -// quoRem sets z to the quotient x/y with the scale s, and if useRem is true, -// it sets remNum and remDen to the numerator and denominator of the remainder. -// It returns z, remNum and remDen. -// -// The remainder is normalized to the range -1 < r < 1 to simplify rounding; -// that is, the results satisfy the following equation: -// -// x / y = z + (remNum/remDen) * 10**(-z.Scale()) -// -// See Rounder for more details about rounding. -// -func (z *Dec) quoRem(x, y *Dec, s Scale, useRem bool, - remNum, remDen *big.Int) (*Dec, *big.Int, *big.Int) { - // difference (required adjustment) compared to "canonical" result scale - shift := s - (x.Scale() - y.Scale()) - // pointers to adjusted unscaled dividend and divisor - var ix, iy *big.Int - switch { - case shift > 0: - // increased scale: decimal-shift dividend left - ix = new(big.Int).Mul(x.UnscaledBig(), exp10(shift)) - iy = y.UnscaledBig() - case shift < 0: - // decreased scale: decimal-shift divisor left - ix = x.UnscaledBig() - iy = new(big.Int).Mul(y.UnscaledBig(), exp10(-shift)) - default: - ix = x.UnscaledBig() - iy = y.UnscaledBig() - } - // save a copy of iy in case it to be overwritten with the result - iy2 := iy - if iy == z.UnscaledBig() { - iy2 = new(big.Int).Set(iy) - } - // set scale - z.SetScale(s) - // set unscaled - if useRem { - // Int division - _, intr := z.UnscaledBig().QuoRem(ix, iy, new(big.Int)) - // set remainder - remNum.Set(intr) - remDen.Set(iy2) - } else { - z.UnscaledBig().Quo(ix, iy) - } - return z, remNum, remDen -} - -type sclr struct{ s Scale } - -func (s sclr) Scale(x, y *Dec) Scale { - return s.s -} - -type scaleQuoExact struct{} - -func (sqe scaleQuoExact) Scale(x, y *Dec) Scale { - rem := new(big.Rat).SetFrac(x.UnscaledBig(), y.UnscaledBig()) - f2, f5 := factor2(rem.Denom()), factor(rem.Denom(), bigInt[5]) - var f10 Scale - if f2 > f5 { - f10 = Scale(f2) - } else { - f10 = Scale(f5) - } - return x.Scale() - y.Scale() + f10 -} - -func factor(n *big.Int, p *big.Int) int { - // could be improved for large factors - d, f := n, 0 - for { - dd, dm := new(big.Int).DivMod(d, p, new(big.Int)) - if dm.Sign() == 0 { - f++ - d = dd - } else { - break - } - } - return f -} - -func factor2(n *big.Int) int { - // could be improved for large factors - f := 0 - for ; n.Bit(f) == 0; f++ { - } - return f -} - -func upscale(a, b *Dec) (*Dec, *Dec) { - if a.Scale() == b.Scale() { - return a, b - } - if a.Scale() > b.Scale() { - bb := b.rescale(a.Scale()) - return a, bb - } - aa := a.rescale(b.Scale()) - return aa, b -} - -func exp10(x Scale) *big.Int { - if int(x) < len(exp10cache) { - return &exp10cache[int(x)] - } - return new(big.Int).Exp(bigInt[10], big.NewInt(int64(x)), nil) -} - -func (x *Dec) rescale(newScale Scale) *Dec { - shift := newScale - x.Scale() - switch { - case shift < 0: - e := exp10(-shift) - return NewDecBig(new(big.Int).Quo(x.UnscaledBig(), e), newScale) - case shift > 0: - e := exp10(shift) - return NewDecBig(new(big.Int).Mul(x.UnscaledBig(), e), newScale) - } - return x -} - -var zeros = []byte("00000000000000000000000000000000" + - "00000000000000000000000000000000") -var lzeros = Scale(len(zeros)) - -func appendZeros(s []byte, n Scale) []byte { - for i := Scale(0); i < n; i += lzeros { - if n > i+lzeros { - s = append(s, zeros...) - } else { - s = append(s, zeros[0:n-i]...) - } - } - return s -} - -func (x *Dec) String() string { - if x == nil { - return "" - } - scale := x.Scale() - s := []byte(x.UnscaledBig().String()) - if scale <= 0 { - if scale != 0 && x.unscaled.Sign() != 0 { - s = appendZeros(s, -scale) - } - return string(s) - } - negbit := Scale(-((x.Sign() - 1) / 2)) - // scale > 0 - lens := Scale(len(s)) - if lens-negbit <= scale { - ss := make([]byte, 0, scale+2) - if negbit == 1 { - ss = append(ss, '-') - } - ss = append(ss, '0', '.') - ss = appendZeros(ss, scale-lens+negbit) - ss = append(ss, s[negbit:]...) - return string(ss) - } - // lens > scale - ss := make([]byte, 0, lens+1) - ss = append(ss, s[:lens-scale]...) - ss = append(ss, '.') - ss = append(ss, s[lens-scale:]...) - return string(ss) -} - -// Format is a support routine for fmt.Formatter. It accepts the decimal -// formats 'd' and 'f', and handles both equivalently. -// Width, precision, flags and bases 2, 8, 16 are not supported. -func (x *Dec) Format(s fmt.State, ch rune) { - if ch != 'd' && ch != 'f' && ch != 'v' && ch != 's' { - fmt.Fprintf(s, "%%!%c(dec.Dec=%s)", ch, x.String()) - return - } - fmt.Fprintf(s, x.String()) -} - -func (z *Dec) scan(r io.RuneScanner) (*Dec, error) { - unscaled := make([]byte, 0, 256) // collects chars of unscaled as bytes - dp, dg := -1, -1 // indexes of decimal point, first digit -loop: - for { - ch, _, err := r.ReadRune() - if err == io.EOF { - break loop - } - if err != nil { - return nil, err - } - switch { - case ch == '+' || ch == '-': - if len(unscaled) > 0 || dp >= 0 { // must be first character - r.UnreadRune() - break loop - } - case ch == '.': - if dp >= 0 { - r.UnreadRune() - break loop - } - dp = len(unscaled) - continue // don't add to unscaled - case ch >= '0' && ch <= '9': - if dg == -1 { - dg = len(unscaled) - } - default: - r.UnreadRune() - break loop - } - unscaled = append(unscaled, byte(ch)) - } - if dg == -1 { - return nil, fmt.Errorf("no digits read") - } - if dp >= 0 { - z.SetScale(Scale(len(unscaled) - dp)) - } else { - z.SetScale(0) - } - _, ok := z.UnscaledBig().SetString(string(unscaled), 10) - if !ok { - return nil, fmt.Errorf("invalid decimal: %s", string(unscaled)) - } - return z, nil -} - -// SetString sets z to the value of s, interpreted as a decimal (base 10), -// and returns z and a boolean indicating success. The scale of z is the -// number of digits after the decimal point (including any trailing 0s), -// or 0 if there is no decimal point. If SetString fails, the value of z -// is undefined but the returned value is nil. -func (z *Dec) SetString(s string) (*Dec, bool) { - r := strings.NewReader(s) - _, err := z.scan(r) - if err != nil { - return nil, false - } - _, _, err = r.ReadRune() - if err != io.EOF { - return nil, false - } - // err == io.EOF => scan consumed all of s - return z, true -} - -// Scan is a support routine for fmt.Scanner; it sets z to the value of -// the scanned number. It accepts the decimal formats 'd' and 'f', and -// handles both equivalently. Bases 2, 8, 16 are not supported. -// The scale of z is the number of digits after the decimal point -// (including any trailing 0s), or 0 if there is no decimal point. -func (z *Dec) Scan(s fmt.ScanState, ch rune) error { - if ch != 'd' && ch != 'f' && ch != 's' && ch != 'v' { - return fmt.Errorf("Dec.Scan: invalid verb '%c'", ch) - } - s.SkipSpace() - _, err := z.scan(s) - return err -} - -// Gob encoding version -const decGobVersion byte = 1 - -func scaleBytes(s Scale) []byte { - buf := make([]byte, scaleSize) - i := scaleSize - for j := 0; j < scaleSize; j++ { - i-- - buf[i] = byte(s) - s >>= 8 - } - return buf -} - -func scale(b []byte) (s Scale) { - for j := 0; j < scaleSize; j++ { - s <<= 8 - s |= Scale(b[j]) - } - return -} - -// GobEncode implements the gob.GobEncoder interface. -func (x *Dec) GobEncode() ([]byte, error) { - buf, err := x.UnscaledBig().GobEncode() - if err != nil { - return nil, err - } - buf = append(append(buf, scaleBytes(x.Scale())...), decGobVersion) - return buf, nil -} - -// GobDecode implements the gob.GobDecoder interface. -func (z *Dec) GobDecode(buf []byte) error { - if len(buf) == 0 { - return fmt.Errorf("Dec.GobDecode: no data") - } - b := buf[len(buf)-1] - if b != decGobVersion { - return fmt.Errorf("Dec.GobDecode: encoding version %d not supported", b) - } - l := len(buf) - scaleSize - 1 - err := z.UnscaledBig().GobDecode(buf[:l]) - if err != nil { - return err - } - z.SetScale(scale(buf[l : l+scaleSize])) - return nil -} - -// MarshalText implements the encoding.TextMarshaler interface. -func (x *Dec) MarshalText() ([]byte, error) { - return []byte(x.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -func (z *Dec) UnmarshalText(data []byte) error { - _, ok := z.SetString(string(data)) - if !ok { - return fmt.Errorf("invalid inf.Dec") - } - return nil -} diff --git a/vendor/gopkg.in/inf.v0/rounder.go b/vendor/gopkg.in/inf.v0/rounder.go deleted file mode 100644 index 3a97ef529b..0000000000 --- a/vendor/gopkg.in/inf.v0/rounder.go +++ /dev/null @@ -1,145 +0,0 @@ -package inf - -import ( - "math/big" -) - -// Rounder represents a method for rounding the (possibly infinite decimal) -// result of a division to a finite Dec. It is used by Dec.Round() and -// Dec.Quo(). -// -// See the Example for results of using each Rounder with some sample values. -// -type Rounder rounder - -// See http://speleotrove.com/decimal/damodel.html#refround for more detailed -// definitions of these rounding modes. -var ( - RoundDown Rounder // towards 0 - RoundUp Rounder // away from 0 - RoundFloor Rounder // towards -infinity - RoundCeil Rounder // towards +infinity - RoundHalfDown Rounder // to nearest; towards 0 if same distance - RoundHalfUp Rounder // to nearest; away from 0 if same distance - RoundHalfEven Rounder // to nearest; even last digit if same distance -) - -// RoundExact is to be used in the case when rounding is not necessary. -// When used with Quo or Round, it returns the result verbatim when it can be -// expressed exactly with the given precision, and it returns nil otherwise. -// QuoExact is a shorthand for using Quo with RoundExact. -var RoundExact Rounder - -type rounder interface { - - // When UseRemainder() returns true, the Round() method is passed the - // remainder of the division, expressed as the numerator and denominator of - // a rational. - UseRemainder() bool - - // Round sets the rounded value of a quotient to z, and returns z. - // quo is rounded down (truncated towards zero) to the scale obtained from - // the Scaler in Quo(). - // - // When the remainder is not used, remNum and remDen are nil. - // When used, the remainder is normalized between -1 and 1; that is: - // - // -|remDen| < remNum < |remDen| - // - // remDen has the same sign as y, and remNum is zero or has the same sign - // as x. - Round(z, quo *Dec, remNum, remDen *big.Int) *Dec -} - -type rndr struct { - useRem bool - round func(z, quo *Dec, remNum, remDen *big.Int) *Dec -} - -func (r rndr) UseRemainder() bool { - return r.useRem -} - -func (r rndr) Round(z, quo *Dec, remNum, remDen *big.Int) *Dec { - return r.round(z, quo, remNum, remDen) -} - -var intSign = []*big.Int{big.NewInt(-1), big.NewInt(0), big.NewInt(1)} - -func roundHalf(f func(c int, odd uint) (roundUp bool)) func(z, q *Dec, rA, rB *big.Int) *Dec { - return func(z, q *Dec, rA, rB *big.Int) *Dec { - z.Set(q) - brA, brB := rA.BitLen(), rB.BitLen() - if brA < brB-1 { - // brA < brB-1 => |rA| < |rB/2| - return z - } - roundUp := false - srA, srB := rA.Sign(), rB.Sign() - s := srA * srB - if brA == brB-1 { - rA2 := new(big.Int).Lsh(rA, 1) - if s < 0 { - rA2.Neg(rA2) - } - roundUp = f(rA2.Cmp(rB)*srB, z.UnscaledBig().Bit(0)) - } else { - // brA > brB-1 => |rA| > |rB/2| - roundUp = true - } - if roundUp { - z.UnscaledBig().Add(z.UnscaledBig(), intSign[s+1]) - } - return z - } -} - -func init() { - RoundExact = rndr{true, - func(z, q *Dec, rA, rB *big.Int) *Dec { - if rA.Sign() != 0 { - return nil - } - return z.Set(q) - }} - RoundDown = rndr{false, - func(z, q *Dec, rA, rB *big.Int) *Dec { - return z.Set(q) - }} - RoundUp = rndr{true, - func(z, q *Dec, rA, rB *big.Int) *Dec { - z.Set(q) - if rA.Sign() != 0 { - z.UnscaledBig().Add(z.UnscaledBig(), intSign[rA.Sign()*rB.Sign()+1]) - } - return z - }} - RoundFloor = rndr{true, - func(z, q *Dec, rA, rB *big.Int) *Dec { - z.Set(q) - if rA.Sign()*rB.Sign() < 0 { - z.UnscaledBig().Add(z.UnscaledBig(), intSign[0]) - } - return z - }} - RoundCeil = rndr{true, - func(z, q *Dec, rA, rB *big.Int) *Dec { - z.Set(q) - if rA.Sign()*rB.Sign() > 0 { - z.UnscaledBig().Add(z.UnscaledBig(), intSign[2]) - } - return z - }} - RoundHalfDown = rndr{true, roundHalf( - func(c int, odd uint) bool { - return c > 0 - })} - RoundHalfUp = rndr{true, roundHalf( - func(c int, odd uint) bool { - return c >= 0 - })} - RoundHalfEven = rndr{true, roundHalf( - func(c int, odd uint) bool { - return c > 0 || c == 0 && odd == 1 - })} -} From 200fa9acbead0d00312979a1a051cf7cb101a7f3 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 23 Jul 2018 14:35:01 +0200 Subject: [PATCH 043/174] rest handlers for acl Signed-off-by: Vladimir Lavor --- plugins/rest/plugin_impl_rest.go | 21 +++-- plugins/vpp/aclplugin/acl_config.go | 4 +- plugins/vpp/aclplugin/data_resync.go | 12 +-- .../vpp/aclplugin/vppcalls/api_vppcalls.go | 8 +- .../vpp/aclplugin/vppcalls/dump_vppcalls.go | 93 +++++++++---------- plugins/vpp/model/acl/keys_agent_acl.go | 35 ++++++- plugins/vpp/model/api_version.go | 18 ++++ 7 files changed, 125 insertions(+), 66 deletions(-) create mode 100644 plugins/vpp/model/api_version.go diff --git a/plugins/rest/plugin_impl_rest.go b/plugins/rest/plugin_impl_rest.go index 52fd42df19..2a642df71d 100644 --- a/plugins/rest/plugin_impl_rest.go +++ b/plugins/rest/plugin_impl_rest.go @@ -20,12 +20,19 @@ import ( "github.com/ligato/cn-infra/flavors/local" "github.com/ligato/cn-infra/rpc/rest" "github.com/ligato/vpp-agent/plugins/govppmux" + "github.com/ligato/vpp-agent/plugins/vpp/model/acl" ) const ( swIndexVarName = "swindex" ) +// REST api methods +const ( + POST = "POST" + GET = "GET" +) + // Plugin registers Rest Plugin type Plugin struct { Deps @@ -64,6 +71,14 @@ func (plugin *Plugin) Init() (err error) { func (plugin *Plugin) AfterInit() (err error) { plugin.Log.Debug("REST API Plugin is up and running") + // Access lists + plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestIPKey(), plugin.ipACLPostHandler, POST) + plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestIPKey(), plugin.ipACLGetHandler, GET) + plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestIPExampleKey(), plugin.exampleIpACLGetHandler, GET) + plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestMACIPKey(), plugin.macipACLPostHandler, POST) + plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestMACIPKey(), plugin.macipACLGetHandler, GET) + plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestMACIPExampleKey(), plugin.exampleMacIpACLGetHandler, GET) + plugin.HTTPHandlers.RegisterHTTPHandler("/interfaces", plugin.interfacesGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler("/bridgedomains", plugin.bridgeDomainsGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler("/bridgedomainids", plugin.bridgeDomainIdsGetHandler, "GET") @@ -73,12 +88,6 @@ func (plugin *Plugin) AfterInit() (err error) { plugin.HTTPHandlers.RegisterHTTPHandler("/staticroutes", plugin.staticRoutesGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler(fmt.Sprintf("/acl/interface/{%s:[0-9]+}", swIndexVarName), plugin.interfaceACLGetHandler, "GET") - plugin.HTTPHandlers.RegisterHTTPHandler("/acl/ip", plugin.ipACLPostHandler, "POST") - plugin.HTTPHandlers.RegisterHTTPHandler("/acl/ip", plugin.ipACLGetHandler, "GET") - plugin.HTTPHandlers.RegisterHTTPHandler("/acl/ip/example", plugin.exampleIpACLGetHandler, "GET") - plugin.HTTPHandlers.RegisterHTTPHandler("/acl/macip", plugin.macipACLPostHandler, "POST") - plugin.HTTPHandlers.RegisterHTTPHandler("/acl/macip", plugin.macipACLGetHandler, "GET") - plugin.HTTPHandlers.RegisterHTTPHandler("/acl/macip/example", plugin.exampleMacIpACLGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler("/command", plugin.commandHandler, "POST") plugin.HTTPHandlers.RegisterHTTPHandler("/telemetry", plugin.telemetryHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler("/telemetry/memory", plugin.telemetryMemoryHandler, "GET") diff --git a/plugins/vpp/aclplugin/acl_config.go b/plugins/vpp/aclplugin/acl_config.go index 6738ca8682..09024a23c7 100644 --- a/plugins/vpp/aclplugin/acl_config.go +++ b/plugins/vpp/aclplugin/acl_config.go @@ -356,7 +356,7 @@ func (plugin *ACLConfigurator) DumpIPACL() (acls []*acl.AccessLists_Acl, err err return nil, err } for _, aclWithIndex := range aclsWithIndex { - acls = append(acls, aclWithIndex.ACLDetails) + acls = append(acls, aclWithIndex.Acl) } return acls, nil } @@ -369,7 +369,7 @@ func (plugin *ACLConfigurator) DumpMACIPACL() (acls []*acl.AccessLists_Acl, err return nil, err } for _, aclWithIndex := range aclsWithIndex { - acls = append(acls, aclWithIndex.ACLDetails) + acls = append(acls, aclWithIndex.Acl) } return acls, nil } diff --git a/plugins/vpp/aclplugin/data_resync.go b/plugins/vpp/aclplugin/data_resync.go index c4530b28c6..1befa59351 100644 --- a/plugins/vpp/aclplugin/data_resync.go +++ b/plugins/vpp/aclplugin/data_resync.go @@ -49,28 +49,28 @@ func (plugin *ACLConfigurator) Resync(nbACLs []*acl.AccessLists_Acl) error { // ACL with IP-type rules uses different binary call to create/remove than MACIP-type. // Check what type of rules is in the ACL - ipRulesExist := len(vppIpACL.ACLDetails.Rules) > 0 && vppIpACL.ACLDetails.Rules[0].GetMatch().GetIpRule() != nil + ipRulesExist := len(vppIpACL.Acl.Rules) > 0 && vppIpACL.Acl.Rules[0].GetMatch().GetIpRule() != nil if ipRulesExist { - if err := plugin.aclHandler.DeleteIPAcl(vppIpACL.Identifier.ACLIndex); err != nil { + if err := plugin.aclHandler.DeleteIPAcl(vppIpACL.Meta.Index); err != nil { plugin.log.Error(err) return err } // Unregister. - plugin.l3l4AclIndexes.UnregisterName(vppIpACL.ACLDetails.AclName) + plugin.l3l4AclIndexes.UnregisterName(vppIpACL.Acl.AclName) continue } } for _, vppMacIpACL := range vppMacIpACLs { - ipRulesExist := len(vppMacIpACL.ACLDetails.Rules) > 0 && vppMacIpACL.ACLDetails.Rules[0].GetMatch().GetMacipRule() != nil + ipRulesExist := len(vppMacIpACL.Acl.Rules) > 0 && vppMacIpACL.Acl.Rules[0].GetMatch().GetMacipRule() != nil if ipRulesExist { - if err := plugin.aclHandler.DeleteMacIPAcl(vppMacIpACL.Identifier.ACLIndex); err != nil { + if err := plugin.aclHandler.DeleteMacIPAcl(vppMacIpACL.Meta.Index); err != nil { plugin.log.Error(err) return err } // Unregister. - plugin.l2AclIndexes.UnregisterName(vppMacIpACL.ACLDetails.AclName) + plugin.l2AclIndexes.UnregisterName(vppMacIpACL.Acl.AclName) continue } } diff --git a/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go b/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go index fcd9a228ed..4d7418b299 100644 --- a/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/aclplugin/vppcalls/api_vppcalls.go @@ -61,17 +61,17 @@ type AclVppRead interface { // GetAclPluginVersion returns version of the VPP ACL plugin GetAclPluginVersion() (string, error) // DumpIPACL returns all IP-type ACLs - DumpIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*ACLEntry, error) + DumpIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*AclDetails, error) // DumpIPACL returns all MACIP-type ACLs - DumpMACIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*ACLEntry, error) + DumpMACIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*AclDetails, error) // DumpACLInterfaces returns a map of IP ACL indices with interfaces DumpIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex) (map[uint32]*acl.AccessLists_Acl_Interfaces, error) // DumpMACIPACLInterfaces returns a map of MACIP ACL indices with interfaces DumpMACIPACLInterfaces(indices []uint32, swIfIndices ifaceidx.SwIfIndex) (map[uint32]*acl.AccessLists_Acl_Interfaces, error) // DumpIPAcls returns a list of all configured ACLs with IP-type ruleData. - DumpIPAcls() (map[ACLIdentifier][]aclapi.ACLRule, error) + DumpIPAcls() (map[AclMeta][]aclapi.ACLRule, error) // DumpMacIPAcls returns a list of all configured ACL with IPMAC-type ruleData. - DumpMacIPAcls() (map[ACLIdentifier][]aclapi.MacipACLRule, error) + DumpMacIPAcls() (map[AclMeta][]aclapi.MacipACLRule, error) // DumpInterfaceAcls finds interface in VPP and returns its ACL configuration DumpInterfaceIPAcls(swIndex uint32) (acl.AccessLists, error) // DumpInterfaceMACIPAcls finds interface in VPP and returns its MACIP ACL configuration diff --git a/plugins/vpp/aclplugin/vppcalls/dump_vppcalls.go b/plugins/vpp/aclplugin/vppcalls/dump_vppcalls.go index 1e1ac07168..0315207036 100644 --- a/plugins/vpp/aclplugin/vppcalls/dump_vppcalls.go +++ b/plugins/vpp/aclplugin/vppcalls/dump_vppcalls.go @@ -34,17 +34,16 @@ const ( ICMPv6Proto = 58 ) -// ACLIdentifier contains fields for ACL index and Tag (used as a name in the configuration) -type ACLIdentifier struct { - ACLIndex uint32 `json:"acl_index"` - Tag string `json:"acl_tag"` +// AclDetails is combination of proto-modelled ACL data and VPP provided metadata +type AclDetails struct { + Acl *acl.AccessLists_Acl `json:"acl"` + Meta *AclMeta `json:"acl_meta"` } -// ACLEntry is cumulative object with ACL identification and details with all ruleData and -// interfaces belonging to the ACL -type ACLEntry struct { - Identifier *ACLIdentifier - ACLDetails *acl.AccessLists_Acl `json:"acl_details"` +// AclMeta holds VPP-specific metadata +type AclMeta struct { + Index uint32 `json:"acl_index"` + Tag string `json:"acl_tag"` } // ACLToInterface is definition of interface and all ACLs which are bound to @@ -55,8 +54,8 @@ type ACLToInterface struct { EgressACL []uint32 } -func (handler *aclVppHandler) DumpIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*ACLEntry, error) { - ruleIPData := make(map[ACLIdentifier][]*acl.AccessLists_Acl_Rule) +func (handler *aclVppHandler) DumpIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*AclDetails, error) { + ruleIPData := make(map[AclMeta][]*acl.AccessLists_Acl_Rule) // get all ACLs with IP ruleData IPRuleACLs, err := handler.DumpIPAcls() @@ -85,7 +84,7 @@ func (handler *aclVppHandler) DumpIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*ACLE // Prepare separate list of all active ACL indices on the VPP var indices []uint32 for identifier := range ruleIPData { - indices = append(indices, identifier.ACLIndex) + indices = append(indices, identifier.Index) } // Get all ACL indices with ingress and egress interfaces @@ -94,18 +93,18 @@ func (handler *aclVppHandler) DumpIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*ACLE return nil, err } - var ACLs []*ACLEntry + var ACLs []*AclDetails // Build a list of ACL ruleData with ruleData, interfaces, index and tag (name) for identifier, rules := range ruleIPData { - ACLs = append(ACLs, &ACLEntry{ - Identifier: &ACLIdentifier{ - ACLIndex: identifier.ACLIndex, - Tag: identifier.Tag, - }, - ACLDetails: &acl.AccessLists_Acl{ + ACLs = append(ACLs, &AclDetails{ + Acl: &acl.AccessLists_Acl{ AclName: identifier.Tag, Rules: rules, - Interfaces: interfaceData[identifier.ACLIndex], + Interfaces: interfaceData[identifier.Index], + }, + Meta: &AclMeta{ + Index: identifier.Index, + Tag: identifier.Tag, }, }) } @@ -113,9 +112,9 @@ func (handler *aclVppHandler) DumpIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*ACLE return ACLs, wasErr } -func (handler *aclVppHandler) DumpMACIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*ACLEntry, error) { +func (handler *aclVppHandler) DumpMACIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*AclDetails, error) { - ruleMACIPData := make(map[ACLIdentifier][]*acl.AccessLists_Acl_Rule) + ruleMACIPData := make(map[AclMeta][]*acl.AccessLists_Acl_Rule) // get all ACLs with MACIP ruleData MACIPRuleACLs, err := handler.DumpMacIPAcls() @@ -125,7 +124,7 @@ func (handler *aclVppHandler) DumpMACIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*A // resolve MACIP rules for every ACL var wasErr error - for identifier, MACIPRules := range MACIPRuleACLs { + for metadata, MACIPRules := range MACIPRuleACLs { var rulesDetails []*acl.AccessLists_Acl_Rule if len(MACIPRules) > 0 { @@ -137,13 +136,13 @@ func (handler *aclVppHandler) DumpMACIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*A rulesDetails = append(rulesDetails, ruleDetails) } } - ruleMACIPData[identifier] = rulesDetails + ruleMACIPData[metadata] = rulesDetails } // Prepare separate list of all active ACL indices on the VPP var indices []uint32 for identifier := range ruleMACIPData { - indices = append(indices, identifier.ACLIndex) + indices = append(indices, identifier.Index) } // Get all ACL indices with ingress and egress interfaces @@ -152,18 +151,18 @@ func (handler *aclVppHandler) DumpMACIPACL(swIfIndices ifaceidx.SwIfIndex) ([]*A return nil, err } - var ACLs []*ACLEntry + var ACLs []*AclDetails // Build a list of ACL ruleData with ruleData, interfaces, index and tag (name) - for identifier, rules := range ruleMACIPData { - ACLs = append(ACLs, &ACLEntry{ - Identifier: &ACLIdentifier{ - ACLIndex: identifier.ACLIndex, - Tag: identifier.Tag, - }, - ACLDetails: &acl.AccessLists_Acl{ - AclName: identifier.Tag, + for metadata, rules := range ruleMACIPData { + ACLs = append(ACLs, &AclDetails{ + Acl: &acl.AccessLists_Acl{ + AclName: metadata.Tag, Rules: rules, - Interfaces: interfaceData[identifier.ACLIndex], + Interfaces: interfaceData[metadata.Index], + }, + Meta: &AclMeta{ + Index: metadata.Index, + Tag: metadata.Tag, }, }) } @@ -310,12 +309,12 @@ func (handler *aclVppHandler) DumpMACIPACLInterfaces(indices []uint32, swIfIndic return aclsWithInterfaces, wasErr } -func (handler *aclVppHandler) DumpIPAcls() (map[ACLIdentifier][]acl_api.ACLRule, error) { +func (handler *aclVppHandler) DumpIPAcls() (map[AclMeta][]acl_api.ACLRule, error) { defer func(start time.Time) { handler.stopwatch.TimeLog(acl_api.ACLDump{}).LogTimeEntry(time.Since(start)) }(time.Now()) - aclIPRules := make(map[ACLIdentifier][]acl_api.ACLRule) + aclIPRules := make(map[AclMeta][]acl_api.ACLRule) var wasErr error req := &acl_api.ACLDump{} @@ -331,23 +330,23 @@ func (handler *aclVppHandler) DumpIPAcls() (map[ACLIdentifier][]acl_api.ACLRule, break } - identifier := ACLIdentifier{ - ACLIndex: msg.ACLIndex, - Tag: string(bytes.SplitN(msg.Tag, []byte{0x00}, 2)[0]), + metadata := AclMeta{ + Index: msg.ACLIndex, + Tag: string(bytes.SplitN(msg.Tag, []byte{0x00}, 2)[0]), } - aclIPRules[identifier] = msg.R + aclIPRules[metadata] = msg.R } return aclIPRules, wasErr } -func (handler *aclVppHandler) DumpMacIPAcls() (map[ACLIdentifier][]acl_api.MacipACLRule, error) { +func (handler *aclVppHandler) DumpMacIPAcls() (map[AclMeta][]acl_api.MacipACLRule, error) { defer func(start time.Time) { handler.stopwatch.TimeLog(acl_api.MacipACLDump{}).LogTimeEntry(time.Since(start)) }(time.Now()) - aclMACIPRules := make(map[ACLIdentifier][]acl_api.MacipACLRule) + aclMACIPRules := make(map[AclMeta][]acl_api.MacipACLRule) var wasErr error req := &acl_api.MacipACLDump{} @@ -363,12 +362,12 @@ func (handler *aclVppHandler) DumpMacIPAcls() (map[ACLIdentifier][]acl_api.Macip break } - identifier := ACLIdentifier{ - ACLIndex: msg.ACLIndex, - Tag: string(bytes.SplitN(msg.Tag, []byte{0x00}, 2)[0]), + metadata := AclMeta{ + Index: msg.ACLIndex, + Tag: string(bytes.SplitN(msg.Tag, []byte{0x00}, 2)[0]), } - aclMACIPRules[identifier] = msg.R + aclMACIPRules[metadata] = msg.R } return aclMACIPRules, wasErr } diff --git a/plugins/vpp/model/acl/keys_agent_acl.go b/plugins/vpp/model/acl/keys_agent_acl.go index f15233058c..7299e8c547 100644 --- a/plugins/vpp/model/acl/keys_agent_acl.go +++ b/plugins/vpp/model/acl/keys_agent_acl.go @@ -14,7 +14,20 @@ package acl -const aclPrefix = "vpp/config/v1/acl/" +import "github.com/ligato/vpp-agent/plugins/vpp/model" + +const ( + // DB key prefix + aclPrefix = "vpp/config" + model.ProtoApiVersion + "acl/" + // REST Acl IP prefix + restAclIP = model.ProtoApiVersion + "acl/ip" + // REST Acl IP example prefix + restAclIPExample = model.ProtoApiVersion + "acl/ip/example" + // REST Acl MACIP prefix + restAclMACIP = model.ProtoApiVersion + "acl/macip" + // REST Acl MACIP example prefix + restAclMACIPExample = model.ProtoApiVersion + "acl/macip/example" +) // KeyPrefix returns the prefix used in ETCD to store vpp ACLs config. func KeyPrefix() string { @@ -26,3 +39,23 @@ func KeyPrefix() string { func Key(aclName string) string { return aclPrefix + aclName } + +// RestIPKey returns prefix used in REST to dump ACL IP config +func RestIPKey() string { + return restAclIP +} + +// RestIPExampleKey returns prefix used in REST to dump ACL IP example config +func RestIPExampleKey() string { + return restAclIPExample +} + +// RestMACIPKey returns prefix used in REST to dump ACL MACIP config +func RestMACIPKey() string { + return restAclMACIP +} + +// RestMACIPExampleKey returns prefix used in REST to dump ACL MACIP example config +func RestMACIPExampleKey() string { + return restAclMACIPExample +} diff --git a/plugins/vpp/model/api_version.go b/plugins/vpp/model/api_version.go new file mode 100644 index 0000000000..62d7083929 --- /dev/null +++ b/plugins/vpp/model/api_version.go @@ -0,0 +1,18 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// ProtoApiVersion is current version of the northbound api +const ProtoApiVersion = "/v1/" From 8bd9ac5c8d8b1c5a6dc74c71c7f48eab3127692f Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Tue, 24 Jul 2018 15:28:20 +0200 Subject: [PATCH 044/174] rest handlers for interfaces Signed-off-by: Vladimir Lavor --- plugins/rest/plugin_impl_rest.go | 10 +- plugins/rest/rest_handlers.go | 235 ++++++++++++++++++ plugins/vpp/error_status.go | 6 +- .../model/interfaces/keys_agent_interfaces.go | 75 +++++- plugins/vpp/watch_events.go | 2 +- 5 files changed, 311 insertions(+), 17 deletions(-) diff --git a/plugins/rest/plugin_impl_rest.go b/plugins/rest/plugin_impl_rest.go index 2a642df71d..b5d396c854 100644 --- a/plugins/rest/plugin_impl_rest.go +++ b/plugins/rest/plugin_impl_rest.go @@ -21,6 +21,7 @@ import ( "github.com/ligato/cn-infra/rpc/rest" "github.com/ligato/vpp-agent/plugins/govppmux" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" + "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" ) const ( @@ -78,8 +79,15 @@ func (plugin *Plugin) AfterInit() (err error) { plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestMACIPKey(), plugin.macipACLPostHandler, POST) plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestMACIPKey(), plugin.macipACLGetHandler, GET) plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestMACIPExampleKey(), plugin.exampleMacIpACLGetHandler, GET) + // Interfaces + plugin.HTTPHandlers.RegisterHTTPHandler(interfaces.RestInterfaceKey(), plugin.interfacesGetHandler, GET) + plugin.HTTPHandlers.RegisterHTTPHandler(interfaces.RestLoopbackKey(), plugin.loopbackGetHandler, GET) + plugin.HTTPHandlers.RegisterHTTPHandler(interfaces.RestEthernetKey(), plugin.ethernetGetHandler, GET) + plugin.HTTPHandlers.RegisterHTTPHandler(interfaces.RestMemifKey(), plugin.memifGetHandler, GET) + plugin.HTTPHandlers.RegisterHTTPHandler(interfaces.RestTapKey(), plugin.tapGetHandler, GET) + plugin.HTTPHandlers.RegisterHTTPHandler(interfaces.RestAfPAcketKey(), plugin.afpacketGetHandler, GET) + plugin.HTTPHandlers.RegisterHTTPHandler(interfaces.RestVxLanKey(), plugin.vxLanTypeGetHandler, GET) - plugin.HTTPHandlers.RegisterHTTPHandler("/interfaces", plugin.interfacesGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler("/bridgedomains", plugin.bridgeDomainsGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler("/bridgedomainids", plugin.bridgeDomainIdsGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler("/l2fibs", plugin.fibTableEntriesGetHandler, "GET") diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index cdd741d9d2..36fd13efe6 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -34,6 +34,7 @@ import ( l2plugin "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" l3plugin "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" + "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" ) // interfacesGetHandler - used to get list of all interfaces @@ -69,6 +70,240 @@ func (plugin *Plugin) interfacesGetHandler(formatter *render.Render) http.Handle } } +// interfacesGetHandler - used to get list of all interfaces +func (plugin *Plugin) loopbackGetHandler(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + + plugin.Log.Debug("Getting list of all interfaces") + + // create an API channel + ch, err := plugin.GoVppmux.NewAPIChannel() + if err != nil { + plugin.Log.Errorf("Error creating channel: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + defer ch.Close() + + ifHandler, err := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + res, err := ifHandler.DumpInterfaces() + if err != nil { + plugin.Log.Errorf("Error: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + // Keep only loopback type interfaces + for ifKey, ifConfig := range res { + if ifConfig.Type != interfaces.InterfaceType_SOFTWARE_LOOPBACK { + delete(res, ifKey) + } + } + + plugin.Log.Debug(res) + formatter.JSON(w, http.StatusOK, res) + } +} + +// interfacesGetHandler - used to get list of all interfaces +func (plugin *Plugin) ethernetGetHandler(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + + plugin.Log.Debug("Getting list of all interfaces") + + // create an API channel + ch, err := plugin.GoVppmux.NewAPIChannel() + if err != nil { + plugin.Log.Errorf("Error creating channel: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + defer ch.Close() + + ifHandler, err := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + res, err := ifHandler.DumpInterfaces() + if err != nil { + plugin.Log.Errorf("Error: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + // Keep only loopback type interfaces + for ifKey, ifConfig := range res { + if ifConfig.Type != interfaces.InterfaceType_ETHERNET_CSMACD { + delete(res, ifKey) + } + } + + plugin.Log.Debug(res) + formatter.JSON(w, http.StatusOK, res) + } +} + +// interfacesGetHandler - used to get list of all interfaces +func (plugin *Plugin) memifGetHandler(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + + plugin.Log.Debug("Getting list of all interfaces") + + // create an API channel + ch, err := plugin.GoVppmux.NewAPIChannel() + if err != nil { + plugin.Log.Errorf("Error creating channel: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + defer ch.Close() + + ifHandler, err := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + res, err := ifHandler.DumpInterfaces() + if err != nil { + plugin.Log.Errorf("Error: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + // Keep only loopback type interfaces + for ifKey, ifConfig := range res { + if ifConfig.Type != interfaces.InterfaceType_MEMORY_INTERFACE { + delete(res, ifKey) + } + } + + plugin.Log.Debug(res) + formatter.JSON(w, http.StatusOK, res) + } +} + +// interfacesGetHandler - used to get list of all interfaces +func (plugin *Plugin) tapGetHandler(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + + plugin.Log.Debug("Getting list of all interfaces") + + // create an API channel + ch, err := plugin.GoVppmux.NewAPIChannel() + if err != nil { + plugin.Log.Errorf("Error creating channel: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + defer ch.Close() + + ifHandler, err := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + res, err := ifHandler.DumpInterfaces() + if err != nil { + plugin.Log.Errorf("Error: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + // Keep only loopback type interfaces + for ifKey, ifConfig := range res { + if ifConfig.Type != interfaces.InterfaceType_TAP_INTERFACE { + delete(res, ifKey) + } + } + + plugin.Log.Debug(res) + formatter.JSON(w, http.StatusOK, res) + } +} + +// interfacesGetHandler - used to get list of all interfaces +func (plugin *Plugin) afpacketGetHandler(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + + plugin.Log.Debug("Getting list of all interfaces") + + // create an API channel + ch, err := plugin.GoVppmux.NewAPIChannel() + if err != nil { + plugin.Log.Errorf("Error creating channel: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + defer ch.Close() + + ifHandler, err := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + res, err := ifHandler.DumpInterfaces() + if err != nil { + plugin.Log.Errorf("Error: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + // Keep only loopback type interfaces + for ifKey, ifConfig := range res { + if ifConfig.Type != interfaces.InterfaceType_AF_PACKET_INTERFACE { + delete(res, ifKey) + } + } + + plugin.Log.Debug(res) + formatter.JSON(w, http.StatusOK, res) + } +} + +// interfacesGetHandler - used to get list of all interfaces +func (plugin *Plugin) vxLanTypeGetHandler(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + + plugin.Log.Debug("Getting list of all interfaces") + + // create an API channel + ch, err := plugin.GoVppmux.NewAPIChannel() + if err != nil { + plugin.Log.Errorf("Error creating channel: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + defer ch.Close() + + ifHandler, err := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) + if err != nil { + plugin.Log.Errorf("Error creating VPP handler: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + res, err := ifHandler.DumpInterfaces() + if err != nil { + plugin.Log.Errorf("Error: %v", err) + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + // Keep only loopback type interfaces + for ifKey, ifConfig := range res { + if ifConfig.Type != interfaces.InterfaceType_VXLAN_TUNNEL { + delete(res, ifKey) + } + } + + plugin.Log.Debug(res) + formatter.JSON(w, http.StatusOK, res) + } +} + // bridgeDomainIdsGetHandler - used to get list of all bridge domain ids func (plugin *Plugin) bridgeDomainIdsGetHandler(formatter *render.Render) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { diff --git a/plugins/vpp/error_status.go b/plugins/vpp/error_status.go index 98d59c7f4b..48768a96a6 100644 --- a/plugins/vpp/error_status.go +++ b/plugins/vpp/error_status.go @@ -250,7 +250,7 @@ func (plugin *Plugin) removeErrorLog(key string) { return } - if prefix == interfaces.InterfacePrefix { + if prefix == interfaces.InterfaceKeyPrefix() { key := interfaces.InterfaceErrorKey(name) plugin.Publish.Put(key, nil) plugin.Log.Infof("Error status log for interface %v cleared", name) @@ -269,8 +269,8 @@ func (plugin *Plugin) removeOldestErrorLogEntry(key string) { var name string var metaData interface{} var exists bool - if strings.HasPrefix(key, interfaces.IfErrorPrefix) { - name = strings.Replace(key, interfaces.IfErrorPrefix, "", 1) + if strings.HasPrefix(key, interfaces.InterfaceErrorPrefix()) { + name = strings.Replace(key, interfaces.InterfaceErrorPrefix(), "", 1) _, metaData, exists = plugin.errorIndexes.LookupIdx(name) } else if strings.HasPrefix(key, l2.BdErrPrefix) { name = strings.Replace(key, l2.BdErrPrefix, "", 1) diff --git a/plugins/vpp/model/interfaces/keys_agent_interfaces.go b/plugins/vpp/model/interfaces/keys_agent_interfaces.go index 59d50106ca..bc254af361 100644 --- a/plugins/vpp/model/interfaces/keys_agent_interfaces.go +++ b/plugins/vpp/model/interfaces/keys_agent_interfaces.go @@ -17,20 +17,36 @@ package interfaces import ( "fmt" "strings" + + "github.com/ligato/vpp-agent/plugins/vpp/model" ) const ( - // InterfacePrefix vpp/config/v1/interface/ - InterfacePrefix = "vpp/config/v1/interface/" - // IfStatePrefix vpp/status/v1/interface/ - IfStatePrefix = "vpp/status/v1/interface/" - // IfErrorPrefix vpp/status/v1/interface/error - IfErrorPrefix = "vpp/status/v1/interface/error/" + // interfacePrefix is interface prefix + interfacePrefix = "vpp/config" + model.ProtoApiVersion + "interface/" + // ifStatePrefix is interface state prefix + ifStatePrefix = "vpp/status" + model.ProtoApiVersion + "interface/" + // ifErrorPrefix is interface error prefix + ifErrorPrefix = "vpp/status" + model.ProtoApiVersion + "interface/error/" + // restInterface is rest interface path + restInterface = model.ProtoApiVersion + "interface" + // restLoopback is path for loopback interface + restLoopback = model.ProtoApiVersion + "interface/loopback" + // restLoopback is path for physical interface + restEthernet = model.ProtoApiVersion + "interface/ethernet" + // restLoopback is path for memif interface + restMemif = model.ProtoApiVersion + "interface/memif" + // restLoopback is path for tap interface + restTap = model.ProtoApiVersion + "interface/tap" + // restAfPacket is path for af-packet interface + restAfPacket = model.ProtoApiVersion + "interface/afpacket" + // restLoopback is path for vxlan interface + restVxLan = model.ProtoApiVersion + "interface/vxlan" ) // InterfaceKeyPrefix returns the prefix used in ETCD to store vpp interfaces config. func InterfaceKeyPrefix() string { - return InterfacePrefix + return interfacePrefix } // ParseNameFromKey returns suffix of the key. @@ -46,26 +62,61 @@ func ParseNameFromKey(key string) (name string, err error) { // InterfaceKey returns the prefix used in ETCD to store the vpp interface config // of a particular interface in selected vpp instance. func InterfaceKey(ifaceLabel string) string { - return InterfacePrefix + ifaceLabel + return interfacePrefix + ifaceLabel } // InterfaceErrorPrefix returns the prefix used in ETCD to store the interface errors. func InterfaceErrorPrefix() string { - return IfErrorPrefix + return ifErrorPrefix } // InterfaceErrorKey returns the key used in ETCD to store the interface errors. func InterfaceErrorKey(ifaceLabel string) string { - return IfErrorPrefix + ifaceLabel + return ifErrorPrefix + ifaceLabel } // InterfaceStateKeyPrefix returns the prefix used in ETCD to store the vpp interfaces state data. func InterfaceStateKeyPrefix() string { - return IfStatePrefix + return ifStatePrefix } // InterfaceStateKey returns the prefix used in ETCD to store the vpp interface state data // of particular interface in selected vpp instance. func InterfaceStateKey(ifaceLabel string) string { - return IfStatePrefix + ifaceLabel + return ifStatePrefix + ifaceLabel +} + +// RestInterfaceKey returns prefix used in REST to dump interface config +func RestInterfaceKey() string { + return restInterface +} + +// RestLoopbackKey returns prefix used in REST to dump loopback interface config +func RestLoopbackKey() string { + return restLoopback +} + +// RestEthernetKey returns prefix used in REST to dump ethernet interface config +func RestEthernetKey() string { + return restEthernet +} + +// RestMemifKey returns prefix used in REST to dump memif interface config +func RestMemifKey() string { + return restMemif +} + +// RestTapKey returns prefix used in REST to dump tap interface config +func RestTapKey() string { + return restTap +} + +// RestAfPAcketKey returns prefix used in REST to dump af-packet interface config +func RestAfPAcketKey() string { + return restAfPacket +} + +// RestVxLanKey returns prefix used in REST to dump VxLAN interface config +func RestVxLanKey() string { + return restVxLan } diff --git a/plugins/vpp/watch_events.go b/plugins/vpp/watch_events.go index 01e0c7447b..27beac4d69 100644 --- a/plugins/vpp/watch_events.go +++ b/plugins/vpp/watch_events.go @@ -98,7 +98,7 @@ func (plugin *Plugin) onStatusResyncEvent(e datasync.ResyncEvent) { var wasError error for key, vals := range e.GetValues() { plugin.Log.Debugf("trying to delete obsolete status for key %v begin ", key) - if strings.HasPrefix(key, interfaces.IfStatePrefix) { + if strings.HasPrefix(key, interfaces.InterfaceStateKeyPrefix()) { var keys []string for { x, stop := vals.GetNext() From f4a0ae08e895863b61892dfa786f4a6f4ef0af3c Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Wed, 25 Jul 2018 12:37:21 +0200 Subject: [PATCH 045/174] simple handler registration Signed-off-by: Vladimir Lavor --- cmd/agentctl/cmd/clean_cmd.go | 8 +-- cmd/agentctl/utils/common_utils_test.go | 14 ++--- cmd/agentctl/utils/db_utils.go | 16 +++--- flavors/vpp/vpp_flavor.go | 1 + plugins/vpp/model/l2/keys_agent_l2.go | 70 ++++++++++++++++++------- plugins/vpp/watch_events.go | 2 +- 6 files changed, 71 insertions(+), 40 deletions(-) diff --git a/cmd/agentctl/cmd/clean_cmd.go b/cmd/agentctl/cmd/clean_cmd.go index 1064b891f1..da57b01a2f 100644 --- a/cmd/agentctl/cmd/clean_cmd.go +++ b/cmd/agentctl/cmd/clean_cmd.go @@ -55,9 +55,9 @@ If no data type filter is specified, all data for the specified vpp(s) will be deleted. If no [agent-label-filter] argument is specified, data for all agents will be deleted.`, dataTypeFlagName, dataTypeFlagName, - status.StatusPrefix, interfaces.InterfacePrefix, - interfaces.IfStatePrefix, l2.BdPrefix, - l2.XconnectPrefix, l3.RoutesPrefix), + status.StatusPrefix, interfaces.InterfaceKeyPrefix(), + interfaces.InterfaceStateKeyPrefix(), l2.BridgeDomainKeyPrefix(), + l2.XConnectKeyPrefix(), l3.RoutesPrefix), Example: fmt.Sprintf(` Delete all data for "vpp1": $ agentctl clean vpp1 Delete status data for "vpp1"": @@ -66,7 +66,7 @@ for all agents will be deleted.`, $ agentctl clean vpp1 -dataType %s,%s Delete all data for all agents (no filter): $ agentctl clean`, - status.StatusPrefix, status.StatusPrefix, interfaces.InterfacePrefix), + status.StatusPrefix, status.StatusPrefix, interfaces.InterfaceKeyPrefix()), Run: cleanFunc, } diff --git a/cmd/agentctl/utils/common_utils_test.go b/cmd/agentctl/utils/common_utils_test.go index 2a5e51d56d..a670e68f54 100644 --- a/cmd/agentctl/utils/common_utils_test.go +++ b/cmd/agentctl/utils/common_utils_test.go @@ -46,7 +46,7 @@ func Test02ParseKeyInterfaceConfig(t *testing.T) { ParseKey("/vnf-agent/{agent-label}/vpp/config/v1/interface/{interface-name}") gomega.Expect(label).To(gomega.BeEquivalentTo("{agent-label}")) - gomega.Expect(dataType).To(gomega.BeEquivalentTo(interfaces.InterfacePrefix)) + gomega.Expect(dataType).To(gomega.BeEquivalentTo(interfaces.InterfaceKeyPrefix())) gomega.Expect(params).To(gomega.BeEquivalentTo("{interface-name}")) } @@ -58,7 +58,7 @@ func Test03ParseKeyInterfaceStatus(t *testing.T) { ParseKey("/vnf-agent/{agent-label}/vpp/status/v1/interface/{interface-name}") gomega.Expect(label).To(gomega.BeEquivalentTo("{agent-label}")) - gomega.Expect(dataType).To(gomega.BeEquivalentTo(interfaces.IfStatePrefix)) + gomega.Expect(dataType).To(gomega.BeEquivalentTo(interfaces.InterfaceStateKeyPrefix())) gomega.Expect(params).To(gomega.BeEquivalentTo("{interface-name}")) } @@ -70,7 +70,7 @@ func Test04ParseKeyInterfaceError(t *testing.T) { ParseKey("/vnf-agent/{agent-label}/vpp/status/v1/interface/error/{interface-name}") gomega.Expect(label).To(gomega.BeEquivalentTo("{agent-label}")) - gomega.Expect(dataType).To(gomega.BeEquivalentTo(interfaces.IfErrorPrefix)) + gomega.Expect(dataType).To(gomega.BeEquivalentTo(interfaces.InterfaceErrorPrefix())) gomega.Expect(params).To(gomega.BeEquivalentTo("{interface-name}")) } @@ -82,7 +82,7 @@ func Test05ParseKeyBdConfig(t *testing.T) { ParseKey("/vnf-agent/{agent-label}/vpp/config/v1/bd/{bd-name}") gomega.Expect(label).To(gomega.BeEquivalentTo("{agent-label}")) - gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.BdPrefix)) + gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.BridgeDomainKeyPrefix())) gomega.Expect(params).To(gomega.BeEquivalentTo("{bd-name}")) } @@ -94,7 +94,7 @@ func Test06ParseKeyBdState(t *testing.T) { ParseKey("/vnf-agent/{agent-label}/vpp/status/v1/bd/{bd-name}") gomega.Expect(label).To(gomega.BeEquivalentTo("{agent-label}")) - gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.BdStatePrefix)) + gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.BridgeDomainStateKeyPrefix())) gomega.Expect(params).To(gomega.BeEquivalentTo("{bd-name}")) } @@ -106,7 +106,7 @@ func Test07ParseKeyBdError(t *testing.T) { ParseKey("/vnf-agent/{agent-label}/vpp/status/v1/bd/error/{bd-name}") gomega.Expect(label).To(gomega.BeEquivalentTo("{agent-label}")) - gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.BdErrPrefix)) + gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.BridgeDomainErrorPrefix())) gomega.Expect(params).To(gomega.BeEquivalentTo("{bd-name}")) } @@ -118,7 +118,7 @@ func Test08ParseKeyFib(t *testing.T) { ParseKey("/vnf-agent/{agent-label}/vpp/config/v1/bd/{bd-label}/fib/{mac-address}") gomega.Expect(label).To(gomega.BeEquivalentTo("{agent-label}")) - gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.FIBPrefix)) + gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.FibKeyPrefix())) gomega.Expect(params).To(gomega.BeEquivalentTo("{mac-address}")) } diff --git a/cmd/agentctl/utils/db_utils.go b/cmd/agentctl/utils/db_utils.go index d7c910b2f8..544c14f0f1 100644 --- a/cmd/agentctl/utils/db_utils.go +++ b/cmd/agentctl/utils/db_utils.go @@ -190,21 +190,21 @@ func (ed EtcdDump) ReadDataFromDb(db keyval.ProtoBroker, key string, vd = newVppDataRecord() } switch dataType { - case interfaces.InterfacePrefix: + case interfaces.InterfaceKeyPrefix(): ed[label], err = readIfConfigFromDb(db, vd, key, params) - case interfaces.IfStatePrefix: + case interfaces.InterfaceStateKeyPrefix(): ed[label], err = readIfStateFromDb(db, vd, key, params) - case interfaces.IfErrorPrefix: + case interfaces.InterfaceErrorPrefix(): ed[label], err = readInterfaceErrorFromDb(db, vd, key, params) - case l2.BdPrefix: + case l2.BridgeDomainKeyPrefix(): ed[label], err = readBdConfigFromDb(db, vd, key, params) - case l2.BdStatePrefix: + case l2.BridgeDomainStateKeyPrefix(): ed[label], err = readBdStateFromDb(db, vd, key, params) - case l2.BdErrPrefix: + case l2.BridgeDomainErrorPrefix(): ed[label], err = readBdErrorFromDb(db, vd, key, params) - case l2.FIBPrefix: + case l2.FibKeyPrefix(): ed[label], err = readFibFromDb(db, vd, key) - case l2.XconnectPrefix: + case l2.XConnectKeyPrefix(): ed[label], err = readXconnectFromDb(db, vd, key, params) case l3.RoutesPrefix: ed[label], err = readRoutesFromDb(db, vd, key) diff --git a/flavors/vpp/vpp_flavor.go b/flavors/vpp/vpp_flavor.go index b43404ed2c..6a93d2fa12 100644 --- a/flavors/vpp/vpp_flavor.go +++ b/flavors/vpp/vpp_flavor.go @@ -141,6 +141,7 @@ func (f *Flavor) Inject() bool { f.RESTAPIPlugin.Deps.PluginInfraDeps = *f.FlavorLocal.InfraDeps("rest") f.RESTAPIPlugin.Deps.HTTPHandlers = &f.FlavorRPC.HTTP f.RESTAPIPlugin.Deps.GoVppmux = &f.GoVPP + f.RESTAPIPlugin.Deps.VPP = &f.VPP f.TelemetryPlugin.Deps.PluginInfraDeps = *f.FlavorLocal.InfraDeps("telemetry") f.TelemetryPlugin.Deps.Prometheus = &f.FlavorRPC.Prometheus diff --git a/plugins/vpp/model/l2/keys_agent_l2.go b/plugins/vpp/model/l2/keys_agent_l2.go index 80d0aa7ebc..2addfd7dfa 100644 --- a/plugins/vpp/model/l2/keys_agent_l2.go +++ b/plugins/vpp/model/l2/keys_agent_l2.go @@ -17,52 +17,62 @@ package l2 import ( "fmt" "strings" + + "github.com/ligato/vpp-agent/plugins/vpp/model" ) // Prefixes const ( - // BdPrefix is the relative key prefix for bridge domains. - BdPrefix = "vpp/config/v1/bd/" - // BdStatePrefix is the relative key prefix for bridge domain state. - BdStatePrefix = "vpp/status/v1/bd/" - // BdErrPrefix is the relative key prefix for the bridge domain error. - BdErrPrefix = "vpp/status/v1/bd/error/" - // FIBPrefix is the relative key prefix for FIB table entries. - FIBPrefix = "vpp/config/v1/bd/{bd}/fib/" - // XconnectPrefix is the relative key prefix for xconnects. - XconnectPrefix = "vpp/config/v1/xconnect/" + // bdPrefix is the relative key prefix for bridge domains. + bdPrefix = "vpp/config" + model.ProtoApiVersion + "bd/" + // bdStatePrefix is the relative key prefix for bridge domain state. + bdStatePrefix = "vpp/status" + model.ProtoApiVersion + "bd/" + // bdErrPrefix is the relative key prefix for the bridge domain error. + bdErrPrefix = "vpp/status" + model.ProtoApiVersion + "bd/error/" + // restBd is rest bridge domain path + restBd = model.ProtoApiVersion + "bd" + // restBdId is rest bridge domain ID path + restBdId = model.ProtoApiVersion + "bdid" + // fibPrefix is the relative key prefix for FIB table entries. + fibPrefix = "vpp/config/v1/bd/{bd}/fib/" + // restFib is rest FIB path + restFib = model.ProtoApiVersion + "fib" + // xConnectPrefix is the relative key prefix for xconnects. + xConnectPrefix = "vpp/config/v1/xconnect/" + // restXc is rest cross-connect path + restXc = model.ProtoApiVersion + "xc" ) // BridgeDomainKeyPrefix returns the prefix used in ETCD to store vpp bridge domain config. func BridgeDomainKeyPrefix() string { - return BdPrefix + return bdPrefix } // BridgeDomainKey returns the prefix used in ETCD to store vpp bridge domain config // of a particular bridge domain in selected vpp instance. func BridgeDomainKey(bdName string) string { - return BdPrefix + bdName + return bdPrefix + bdName } // BridgeDomainStateKeyPrefix returns the prefix used in ETCD to store vpp bridge domain state data. func BridgeDomainStateKeyPrefix() string { - return BdStatePrefix + return bdStatePrefix } // BridgeDomainStateKey returns the prefix used in ETCD to store vpp bridge domain state data // of a particular bridge domain in selected vpp instance. func BridgeDomainStateKey(ifaceLabel string) string { - return BdStatePrefix + ifaceLabel + return bdStatePrefix + ifaceLabel } // BridgeDomainErrorPrefix returns the prefix used in ETCD to store bridge domain errors. func BridgeDomainErrorPrefix() string { - return BdErrPrefix + return bdErrPrefix } // BridgeDomainErrorKey returns the key used in ETCD to store bridge domain errors. func BridgeDomainErrorKey(bdLabel string) string { - return BdErrPrefix + bdLabel + return bdErrPrefix + bdLabel } // ParseBDNameFromKey returns suffix of the key. @@ -75,15 +85,25 @@ func ParseBDNameFromKey(key string) (name string, err error) { return key, fmt.Errorf("wrong format of the key %s", key) } +// RestBridgeDomainKey returns the key used in REST to dump bridge domains. +func RestBridgeDomainKey() string { + return restBd +} + +// RestBridgeDomainIDKey returns the key used in REST to dump bridge domain IDs. +func RestBridgeDomainIDKey() string { + return restBdId +} + // FibKeyPrefix returns the prefix used in ETCD to store vpp fib table entry config. func FibKeyPrefix() string { - return FIBPrefix + return fibPrefix } // FibKey returns the prefix used in ETCD to store vpp fib table entry config // of a particular fib in selected vpp instance. func FibKey(bdLabel string, fibMac string) string { - return strings.Replace(FIBPrefix, "{bd}", bdLabel, 1) + fibMac + return strings.Replace(fibPrefix, "{bd}", bdLabel, 1) + fibMac } // ParseFibKey parses bridge domain label and FIB MAC address from a FIB key. @@ -98,13 +118,23 @@ func ParseFibKey(key string) (isFibKey bool, bdName string, fibMac string) { return false, "", "" } +// RestFibKey returns the prefix used in REST to dump vpp fib table entry config. +func RestFibKey() string { + return restFib +} + // XConnectKeyPrefix returns the prefix used in ETCD to store vpp xConnect pair config. func XConnectKeyPrefix() string { - return XconnectPrefix + return xConnectPrefix } // XConnectKey returns the prefix used in ETCD to store vpp xConnect pair config // of particular xConnect pair in selected vpp instance. func XConnectKey(rxIface string) string { - return XconnectPrefix + rxIface + return xConnectPrefix + rxIface +} + +// RestXConnectKey returns the prefix used in REST to dump vpp xConnect pair config. +func RestXConnectKey() string { + return restXc } diff --git a/plugins/vpp/watch_events.go b/plugins/vpp/watch_events.go index 27beac4d69..7c4cb57afc 100644 --- a/plugins/vpp/watch_events.go +++ b/plugins/vpp/watch_events.go @@ -113,7 +113,7 @@ func (plugin *Plugin) onStatusResyncEvent(e datasync.ResyncEvent) { wasError = err } } - } else if strings.HasPrefix(key, l2.BdStatePrefix) { + } else if strings.HasPrefix(key, l2.BridgeDomainStateKeyPrefix()) { var keys []string for { x, stop := vals.GetNext() From 006a24881328de7ffbfb517ef28ada6cceaf7076 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Wed, 25 Jul 2018 12:37:46 +0200 Subject: [PATCH 046/174] rest calls for l2plugin Signed-off-by: Vladimir Lavor --- cmd/vpp-agent-ctl/data_cmd.go | 4 +- plugins/rest/plugin_impl_rest.go | 116 +++++-- plugins/rest/rest_handlers.go | 507 ++++++++----------------------- plugins/vpp/error_status.go | 6 +- 4 files changed, 222 insertions(+), 411 deletions(-) diff --git a/cmd/vpp-agent-ctl/data_cmd.go b/cmd/vpp-agent-ctl/data_cmd.go index c4f9d21f7d..4b0957f1a5 100644 --- a/cmd/vpp-agent-ctl/data_cmd.go +++ b/cmd/vpp-agent-ctl/data_cmd.go @@ -1274,7 +1274,7 @@ func (ctl *VppAgentCtl) deleteTxn() { // ReportIfaceErrorState reports interface status data to the ETCD func (ctl *VppAgentCtl) reportIfaceErrorState() { - ifErr, err := ctl.broker.ListValues(interfaces.IfErrorPrefix) + ifErr, err := ctl.broker.ListValues(interfaces.InterfaceErrorPrefix()) if err != nil { ctl.Log.Fatal(err) return @@ -1296,7 +1296,7 @@ func (ctl *VppAgentCtl) reportIfaceErrorState() { // ReportBdErrorState reports bridge domain status data to the ETCD func (ctl *VppAgentCtl) reportBdErrorState() { - bdErr, err := ctl.broker.ListValues(l2.BdErrPrefix) + bdErr, err := ctl.broker.ListValues(l2.BridgeDomainErrorPrefix()) if err != nil { ctl.Log.Fatal(err) return diff --git a/plugins/rest/plugin_impl_rest.go b/plugins/rest/plugin_impl_rest.go index b5d396c854..ca54c18174 100644 --- a/plugins/rest/plugin_impl_rest.go +++ b/plugins/rest/plugin_impl_rest.go @@ -17,11 +17,19 @@ package rest import ( "fmt" + "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/flavors/local" "github.com/ligato/cn-infra/rpc/rest" "github.com/ligato/vpp-agent/plugins/govppmux" + "github.com/ligato/vpp-agent/plugins/vpp" + aclvppcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" + ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" + "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/l2idx" + l2vppcalls "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" + "github.com/ligato/vpp-agent/plugins/vpp/model/l2" ) const ( @@ -30,8 +38,7 @@ const ( // REST api methods const ( - POST = "POST" - GET = "GET" + GET = "GET" ) // Plugin registers Rest Plugin @@ -39,6 +46,21 @@ type Plugin struct { Deps indexItems []indexItem + + // Channels + vppChan api.Channel + dumpChan api.Channel + + // Indexes + ifIndexes ifaceidx.SwIfIndex + bdIndexes l2idx.BDIndex + + // Handlers + aclHandler aclvppcalls.AclVppRead + ifHandler ifvppcalls.IfVppRead + bdHandler l2vppcalls.BridgeDomainVppRead + fibHandler l2vppcalls.FibVppRead + xcHandler l2vppcalls.XConnectVppRead } // Deps represents dependencies of Rest Plugin @@ -46,6 +68,7 @@ type Deps struct { local.PluginInfraDeps HTTPHandlers rest.HTTPHandlers GoVppmux govppmux.API + VPP vpp.API } type indexItem struct { @@ -55,14 +78,61 @@ type indexItem struct { // Init initializes the Rest Plugin func (plugin *Plugin) Init() (err error) { + // VPP channels + if plugin.vppChan, err = plugin.GoVppmux.NewAPIChannel(); err != nil { + return err + } + if plugin.dumpChan, err = plugin.GoVppmux.NewAPIChannel(); err != nil { + return err + } + // Indexes + if plugin.VPP != nil { + plugin.ifIndexes = plugin.VPP.GetSwIfIndexes() + plugin.bdIndexes = plugin.VPP.GetBDIndexes() + } + + // Initialize handlers + if plugin.aclHandler, err = aclvppcalls.NewAclVppHandler(plugin.vppChan, plugin.dumpChan, nil); err != nil { + return err + } + if plugin.ifHandler, err = ifvppcalls.NewIfVppHandler(plugin.vppChan, plugin.Log, nil); err != nil { + return err + } + if plugin.ifIndexes != nil { + if plugin.bdHandler, err = l2vppcalls.NewBridgeDomainVppHandler(plugin.vppChan, plugin.ifIndexes, plugin.Log, nil); err != nil { + return err + } + } + if plugin.ifIndexes != nil && plugin.bdIndexes != nil { + if plugin.fibHandler, err = l2vppcalls.NewFibVppHandler(plugin.vppChan, plugin.dumpChan, make(chan *l2vppcalls.FibLogicalReq), + plugin.ifIndexes, plugin.bdIndexes, plugin.Log, nil); err != nil { + return err + } + } + if plugin.ifIndexes != nil { + if plugin.xcHandler, err = l2vppcalls.NewXConnectVppHandler(plugin.vppChan, plugin.ifIndexes, plugin.Log, nil); err != nil { + return err + } + } + plugin.indexItems = []indexItem{ - {Name: "Interfaces", Path: "/interfaces"}, - {Name: "Bridge domains", Path: "/bridgedomains"}, - {Name: "L2Fibs", Path: "/l2fibs"}, - {Name: "XConnectorPairs", Path: "/xconnectpairs"}, + {Name: "ACL IP", Path: acl.RestIPKey()}, + {Name: "ACL MACIP", Path: acl.RestMACIPKey()}, + {Name: "Interfaces", Path: interfaces.RestInterfaceKey()}, + {Name: "Loopback interfaces", Path: interfaces.RestLoopbackKey()}, + {Name: "Ethernet interfaces", Path: interfaces.RestEthernetKey()}, + {Name: "Memif interfaces", Path: interfaces.RestMemifKey()}, + {Name: "Tap interfaces", Path: interfaces.RestTapKey()}, + {Name: "VxLAN interfaces", Path: interfaces.RestVxLanKey()}, + {Name: "Af-packet nterfaces", Path: interfaces.RestAfPAcketKey()}, + {Name: "Bridge domains", Path: l2.RestBridgeDomainKey()}, + {Name: "Bridge domain IDs", Path: l2.RestBridgeDomainIDKey()}, + {Name: "L2Fibs", Path: l2.RestFibKey()}, + {Name: "XConnectorPairs", Path: l2.RestXConnectKey()}, + {Name: "ARPs", Path: "/arps"}, {Name: "Static routes", Path: "/staticroutes"}, - {Name: "ACL IP", Path: "/acl/ip"}, + {Name: "Telemetry", Path: "/telemetry"}, } return nil @@ -72,26 +142,18 @@ func (plugin *Plugin) Init() (err error) { func (plugin *Plugin) AfterInit() (err error) { plugin.Log.Debug("REST API Plugin is up and running") - // Access lists - plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestIPKey(), plugin.ipACLPostHandler, POST) - plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestIPKey(), plugin.ipACLGetHandler, GET) - plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestIPExampleKey(), plugin.exampleIpACLGetHandler, GET) - plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestMACIPKey(), plugin.macipACLPostHandler, POST) - plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestMACIPKey(), plugin.macipACLGetHandler, GET) - plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestMACIPExampleKey(), plugin.exampleMacIpACLGetHandler, GET) - // Interfaces - plugin.HTTPHandlers.RegisterHTTPHandler(interfaces.RestInterfaceKey(), plugin.interfacesGetHandler, GET) - plugin.HTTPHandlers.RegisterHTTPHandler(interfaces.RestLoopbackKey(), plugin.loopbackGetHandler, GET) - plugin.HTTPHandlers.RegisterHTTPHandler(interfaces.RestEthernetKey(), plugin.ethernetGetHandler, GET) - plugin.HTTPHandlers.RegisterHTTPHandler(interfaces.RestMemifKey(), plugin.memifGetHandler, GET) - plugin.HTTPHandlers.RegisterHTTPHandler(interfaces.RestTapKey(), plugin.tapGetHandler, GET) - plugin.HTTPHandlers.RegisterHTTPHandler(interfaces.RestAfPAcketKey(), plugin.afpacketGetHandler, GET) - plugin.HTTPHandlers.RegisterHTTPHandler(interfaces.RestVxLanKey(), plugin.vxLanTypeGetHandler, GET) - - plugin.HTTPHandlers.RegisterHTTPHandler("/bridgedomains", plugin.bridgeDomainsGetHandler, "GET") - plugin.HTTPHandlers.RegisterHTTPHandler("/bridgedomainids", plugin.bridgeDomainIdsGetHandler, "GET") - plugin.HTTPHandlers.RegisterHTTPHandler("/l2fibs", plugin.fibTableEntriesGetHandler, "GET") - plugin.HTTPHandlers.RegisterHTTPHandler("/xconnectpairs", plugin.xconnectPairsGetHandler, "GET") + if err := plugin.registerAccessListHandlers(); err != nil { + return err + } + if err := plugin.registerInterfaceHandlers(); err != nil { + return err + } + if plugin.bdHandler != nil { + if err := plugin.registerL2Handlers(); err != nil { + return err + } + } + plugin.HTTPHandlers.RegisterHTTPHandler("/arps", plugin.arpGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler("/staticroutes", plugin.staticRoutesGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler(fmt.Sprintf("/acl/interface/{%s:[0-9]+}", swIndexVarName), diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index 36fd13efe6..b1b769d2b1 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -30,409 +30,168 @@ import ( "github.com/unrolled/render" aclcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" - ifplugin "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" - l2plugin "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" l3plugin "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" + "github.com/ligato/vpp-agent/plugins/vpp/model/l2" ) -// interfacesGetHandler - used to get list of all interfaces -func (plugin *Plugin) interfacesGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - - plugin.Log.Debug("Getting list of all interfaces") - - // create an API channel - ch, err := plugin.GoVppmux.NewAPIChannel() - if err != nil { - plugin.Log.Errorf("Error creating channel: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - defer ch.Close() - - ifHandler, err := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) - if err != nil { - plugin.Log.Errorf("Error creating VPP handler: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - res, err := ifHandler.DumpInterfaces() - if err != nil { - plugin.Log.Errorf("Error: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - - plugin.Log.Debug(res) - formatter.JSON(w, http.StatusOK, res) +// Registers access list REST handlers +func (plugin *Plugin) registerAccessListHandlers() error { + // GET IP ACLs + if err := plugin.registerHTTPHandler(acl.RestIPKey(), GET, func() (interface{}, error) { + return plugin.aclHandler.DumpIPACL(nil) + }); err != nil { + return err } -} - -// interfacesGetHandler - used to get list of all interfaces -func (plugin *Plugin) loopbackGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - - plugin.Log.Debug("Getting list of all interfaces") + // GET MACIP ACLs + if err := plugin.registerHTTPHandler(acl.RestMACIPKey(), GET, func() (interface{}, error) { + return plugin.aclHandler.DumpMacIPAcls() + }); err != nil { + return err + } + // GET IP ACL example + plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestIPExampleKey(), plugin.exampleIpACLGetHandler, GET) + // GET MACIP ACL example + plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestMACIPExampleKey(), plugin.exampleMacIpACLGetHandler, GET) - // create an API channel - ch, err := plugin.GoVppmux.NewAPIChannel() - if err != nil { - plugin.Log.Errorf("Error creating channel: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - defer ch.Close() + return nil +} - ifHandler, err := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) - if err != nil { - plugin.Log.Errorf("Error creating VPP handler: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - res, err := ifHandler.DumpInterfaces() - if err != nil { - plugin.Log.Errorf("Error: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - // Keep only loopback type interfaces - for ifKey, ifConfig := range res { - if ifConfig.Type != interfaces.InterfaceType_SOFTWARE_LOOPBACK { - delete(res, ifKey) +// Registers interface REST handlers +func (plugin *Plugin) registerInterfaceHandlers() error { + // GET all interfaces + if err := plugin.registerHTTPHandler(interfaces.RestInterfaceKey(), GET, func() (interface{}, error) { + return plugin.ifHandler.DumpInterfaces() + }); err != nil { + return err + } + // GET loopback interfaces + if err := plugin.registerHTTPHandler(interfaces.RestLoopbackKey(), GET, func() (interface{}, error) { + ifs, err := plugin.ifHandler.DumpInterfaces() + for ifKey, ifConfig := range ifs { + if ifConfig.Interface.Type != interfaces.InterfaceType_SOFTWARE_LOOPBACK { + delete(ifs, ifKey) } } - - plugin.Log.Debug(res) - formatter.JSON(w, http.StatusOK, res) + return ifs, err + }); err != nil { + return err } -} - -// interfacesGetHandler - used to get list of all interfaces -func (plugin *Plugin) ethernetGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - - plugin.Log.Debug("Getting list of all interfaces") - - // create an API channel - ch, err := plugin.GoVppmux.NewAPIChannel() - if err != nil { - plugin.Log.Errorf("Error creating channel: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - defer ch.Close() - - ifHandler, err := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) - if err != nil { - plugin.Log.Errorf("Error creating VPP handler: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - res, err := ifHandler.DumpInterfaces() - if err != nil { - plugin.Log.Errorf("Error: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - // Keep only loopback type interfaces - for ifKey, ifConfig := range res { - if ifConfig.Type != interfaces.InterfaceType_ETHERNET_CSMACD { - delete(res, ifKey) + // GET ethernet interfaces + if err := plugin.registerHTTPHandler(interfaces.RestEthernetKey(), GET, func() (interface{}, error) { + ifs, err := plugin.ifHandler.DumpInterfaces() + for ifKey, ifConfig := range ifs { + if ifConfig.Interface.Type != interfaces.InterfaceType_ETHERNET_CSMACD { + delete(ifs, ifKey) } } - - plugin.Log.Debug(res) - formatter.JSON(w, http.StatusOK, res) + return ifs, err + }); err != nil { + return err } -} - -// interfacesGetHandler - used to get list of all interfaces -func (plugin *Plugin) memifGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - - plugin.Log.Debug("Getting list of all interfaces") - - // create an API channel - ch, err := plugin.GoVppmux.NewAPIChannel() - if err != nil { - plugin.Log.Errorf("Error creating channel: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - defer ch.Close() - - ifHandler, err := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) - if err != nil { - plugin.Log.Errorf("Error creating VPP handler: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - res, err := ifHandler.DumpInterfaces() - if err != nil { - plugin.Log.Errorf("Error: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - // Keep only loopback type interfaces - for ifKey, ifConfig := range res { - if ifConfig.Type != interfaces.InterfaceType_MEMORY_INTERFACE { - delete(res, ifKey) + // GET memif interfaces + if err := plugin.registerHTTPHandler(interfaces.RestMemifKey(), GET, func() (interface{}, error) { + ifs, err := plugin.ifHandler.DumpInterfaces() + for ifKey, ifConfig := range ifs { + if ifConfig.Interface.Type != interfaces.InterfaceType_MEMORY_INTERFACE { + delete(ifs, ifKey) } } - - plugin.Log.Debug(res) - formatter.JSON(w, http.StatusOK, res) + return ifs, err + }); err != nil { + return err } -} - -// interfacesGetHandler - used to get list of all interfaces -func (plugin *Plugin) tapGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - - plugin.Log.Debug("Getting list of all interfaces") - - // create an API channel - ch, err := plugin.GoVppmux.NewAPIChannel() - if err != nil { - plugin.Log.Errorf("Error creating channel: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - defer ch.Close() - - ifHandler, err := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) - if err != nil { - plugin.Log.Errorf("Error creating VPP handler: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - res, err := ifHandler.DumpInterfaces() - if err != nil { - plugin.Log.Errorf("Error: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - // Keep only loopback type interfaces - for ifKey, ifConfig := range res { - if ifConfig.Type != interfaces.InterfaceType_TAP_INTERFACE { - delete(res, ifKey) + // GET tap interfaces + if err := plugin.registerHTTPHandler(interfaces.RestTapKey(), GET, func() (interface{}, error) { + ifs, err := plugin.ifHandler.DumpInterfaces() + for ifKey, ifConfig := range ifs { + if ifConfig.Interface.Type != interfaces.InterfaceType_TAP_INTERFACE { + delete(ifs, ifKey) } } - - plugin.Log.Debug(res) - formatter.JSON(w, http.StatusOK, res) + return ifs, err + }); err != nil { + return err } -} - -// interfacesGetHandler - used to get list of all interfaces -func (plugin *Plugin) afpacketGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - - plugin.Log.Debug("Getting list of all interfaces") - - // create an API channel - ch, err := plugin.GoVppmux.NewAPIChannel() - if err != nil { - plugin.Log.Errorf("Error creating channel: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - defer ch.Close() - - ifHandler, err := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) - if err != nil { - plugin.Log.Errorf("Error creating VPP handler: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - res, err := ifHandler.DumpInterfaces() - if err != nil { - plugin.Log.Errorf("Error: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - // Keep only loopback type interfaces - for ifKey, ifConfig := range res { - if ifConfig.Type != interfaces.InterfaceType_AF_PACKET_INTERFACE { - delete(res, ifKey) + // GET af-packet interfaces + if err := plugin.registerHTTPHandler(interfaces.RestAfPAcketKey(), GET, func() (interface{}, error) { + ifs, err := plugin.ifHandler.DumpInterfaces() + for ifKey, ifConfig := range ifs { + if ifConfig.Interface.Type != interfaces.InterfaceType_AF_PACKET_INTERFACE { + delete(ifs, ifKey) } } - - plugin.Log.Debug(res) - formatter.JSON(w, http.StatusOK, res) + return ifs, err + }); err != nil { + return err } -} - -// interfacesGetHandler - used to get list of all interfaces -func (plugin *Plugin) vxLanTypeGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - - plugin.Log.Debug("Getting list of all interfaces") - - // create an API channel - ch, err := plugin.GoVppmux.NewAPIChannel() - if err != nil { - plugin.Log.Errorf("Error creating channel: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - defer ch.Close() - - ifHandler, err := ifplugin.NewIfVppHandler(ch, plugin.Log, nil) - if err != nil { - plugin.Log.Errorf("Error creating VPP handler: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - res, err := ifHandler.DumpInterfaces() - if err != nil { - plugin.Log.Errorf("Error: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - // Keep only loopback type interfaces - for ifKey, ifConfig := range res { - if ifConfig.Type != interfaces.InterfaceType_VXLAN_TUNNEL { - delete(res, ifKey) + // GET VxLAN interfaces + if err := plugin.registerHTTPHandler(interfaces.RestVxLanKey(), GET, func() (interface{}, error) { + ifs, err := plugin.ifHandler.DumpInterfaces() + for ifKey, ifConfig := range ifs { + if ifConfig.Interface.Type != interfaces.InterfaceType_VXLAN_TUNNEL { + delete(ifs, ifKey) } } - - plugin.Log.Debug(res) - formatter.JSON(w, http.StatusOK, res) + return ifs, err + }); err != nil { + return err } -} - -// bridgeDomainIdsGetHandler - used to get list of all bridge domain ids -func (plugin *Plugin) bridgeDomainIdsGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - plugin.Log.Debug("Getting list of all bridge domain ids") - - // create an API channel - ch, err := plugin.GoVppmux.NewAPIChannel() - if err != nil { - plugin.Log.Errorf("Error creating channel: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - defer ch.Close() - - bdHandler, err := l2plugin.NewBridgeDomainVppHandler(ch, plugin.Log, nil) - if err != nil { - plugin.Log.Errorf("Error creating VPP handler: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - res, err := bdHandler.DumpBridgeDomainIDs() - if err != nil { - plugin.Log.Errorf("Error: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - - plugin.Log.Debug(res) - formatter.JSON(w, http.StatusOK, res) - } + return nil } -// bridgeDomainsGetHandler - used to get list of all bridge domains -func (plugin *Plugin) bridgeDomainsGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - - plugin.Log.Debug("Getting list of all bridge domains") - - // create an API channel - ch, err := plugin.GoVppmux.NewAPIChannel() - if err != nil { - plugin.Log.Errorf("Error creating channel: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - defer ch.Close() - - bdHandler, err := l2plugin.NewBridgeDomainVppHandler(ch, plugin.Log, nil) - if err != nil { - plugin.Log.Errorf("Error creating VPP handler: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - res, err := bdHandler.DumpBridgeDomains() - if err != nil { - plugin.Log.Errorf("Error: %v", err) - formatter.JSON(w, http.StatusInternalServerError, nil) - return - } - - plugin.Log.Debug(res) - formatter.JSON(w, http.StatusOK, res) +// Registers L2 plugin REST handlers +func (plugin *Plugin) registerL2Handlers() error { + // GET bridge domain IDs + if err := plugin.registerHTTPHandler(l2.RestBridgeDomainIDKey(), GET, func() (interface{}, error) { + return plugin.bdHandler.DumpBridgeDomainIDs() + }); err != nil { + return err } -} - -// fibTableEntriesGetHandler - used to get list of all fib entries -func (plugin *Plugin) fibTableEntriesGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - - plugin.Log.Debug("Getting list of all fibs") - - // create an API channel - ch, err := plugin.GoVppmux.NewAPIChannel() - if err != nil { - plugin.Log.Errorf("Error creating channel: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - defer ch.Close() - - fibHandler, err := l2plugin.NewFibVppHandler(ch, nil, make(chan *l2plugin.FibLogicalReq), plugin.Log, nil) - if err != nil { - plugin.Log.Errorf("Error creating VPP handler: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - res, err := fibHandler.DumpFIBTableEntries() - if err != nil { - plugin.Log.Errorf("Error: %v", err) - formatter.JSON(w, http.StatusInternalServerError, nil) - return - } - - plugin.Log.Debug(res) - formatter.JSON(w, http.StatusOK, res) + // GET bridge domains + if err := plugin.registerHTTPHandler(l2.RestBridgeDomainKey(), GET, func() (interface{}, error) { + return plugin.bdHandler.DumpBridgeDomains() + }); err != nil { + return err + } + // GET FIB entries + if err := plugin.registerHTTPHandler(l2.RestFibKey(), GET, func() (interface{}, error) { + return plugin.fibHandler.DumpFIBTableEntries() + }); err != nil { + return err + } + // GET cross connects + if err := plugin.registerHTTPHandler(l2.RestXConnectKey(), GET, func() (interface{}, error) { + return plugin.xcHandler.DumpXConnectPairs() + }); err != nil { + return err } -} - -// xconnectPairsGetHandler - used to get list of all connect pairs (transmit and receive interfaces) -func (plugin *Plugin) xconnectPairsGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - plugin.Log.Debug("Getting list of all xconnect pairs") + return nil +} - // create an API channel - ch, err := plugin.GoVppmux.NewAPIChannel() - if err != nil { - plugin.Log.Errorf("Error creating channel: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - defer ch.Close() +// registerHTTPHandler is common register method for all handlers +func (plugin *Plugin) registerHTTPHandler(key, method string, f func() (interface{}, error)) error { + var err error + handlerFunc := func(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + res, err := f() + if err != nil { + plugin.Deps.Log.Errorf("Error: %v", err) + err = formatter.JSON(w, http.StatusInternalServerError, err) + } - xcHandler, err := l2plugin.NewXConnectVppHandler(ch, plugin.Log, nil) - if err != nil { - plugin.Log.Errorf("Error creating VPP handler: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - res, err := xcHandler.DumpXConnectPairs() - if err != nil { - plugin.Log.Errorf("Error: %v", err) - formatter.JSON(w, http.StatusInternalServerError, nil) + plugin.Deps.Log.Debug(res) + formatter.JSON(w, http.StatusOK, res) } - - plugin.Log.Debug(res) - formatter.JSON(w, http.StatusOK, res) } + if err != nil { + return err + } + plugin.HTTPHandlers.RegisterHTTPHandler(key, handlerFunc, method) + return nil } // staticRoutesGetHandler - used to get list of all static routes @@ -523,29 +282,19 @@ func (plugin *Plugin) interfaceACLGetHandler(formatter *render.Render) http.Hand return } - // create an API channel - ch, err := plugin.GoVppmux.NewAPIChannel() - if err != nil { - plugin.Log.Errorf("Error creating channel: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - defer ch.Close() - swIndex := uint32(swIndexuInt64) - aclHandler, err := aclcalls.NewAclVppHandler(ch, ch, nil) if err != nil { plugin.Log.Errorf("Error creating VPP handler: %v", err) formatter.JSON(w, http.StatusInternalServerError, err) return } - res, err := aclHandler.DumpInterfaceIPAcls(swIndex) + res, err := plugin.aclHandler.DumpInterfaceIPAcls(swIndex) if err != nil { plugin.Deps.Log.Errorf("Error: %v", err) formatter.JSON(w, http.StatusInternalServerError, err) return } - res, err = aclHandler.DumpInterfaceMACIPAcls(swIndex) + res, err = plugin.aclHandler.DumpInterfaceMACIPAcls(swIndex) if err != nil { plugin.Log.Errorf("Error: %v", err) formatter.JSON(w, http.StatusInternalServerError, err) diff --git a/plugins/vpp/error_status.go b/plugins/vpp/error_status.go index 48768a96a6..bca4792348 100644 --- a/plugins/vpp/error_status.go +++ b/plugins/vpp/error_status.go @@ -254,7 +254,7 @@ func (plugin *Plugin) removeErrorLog(key string) { key := interfaces.InterfaceErrorKey(name) plugin.Publish.Put(key, nil) plugin.Log.Infof("Error status log for interface %v cleared", name) - } else if prefix == l2.BdPrefix { + } else if prefix == l2.BridgeDomainKeyPrefix() { key := l2.BridgeDomainErrorKey(name) plugin.Publish.Put(key, nil) plugin.Log.Infof("Error status log for bridge domain %v cleared", name) @@ -272,8 +272,8 @@ func (plugin *Plugin) removeOldestErrorLogEntry(key string) { if strings.HasPrefix(key, interfaces.InterfaceErrorPrefix()) { name = strings.Replace(key, interfaces.InterfaceErrorPrefix(), "", 1) _, metaData, exists = plugin.errorIndexes.LookupIdx(name) - } else if strings.HasPrefix(key, l2.BdErrPrefix) { - name = strings.Replace(key, l2.BdErrPrefix, "", 1) + } else if strings.HasPrefix(key, l2.BridgeDomainErrorPrefix()) { + name = strings.Replace(key, l2.BridgeDomainErrorPrefix(), "", 1) _, metaData, exists = plugin.errorIndexes.LookupIdx(name) } if !exists { From d5f5773875963dddab952e89eecb9187c4dcf8f0 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Wed, 25 Jul 2018 12:38:25 +0200 Subject: [PATCH 047/174] imporved dump for interfaces and bridge domains Signed-off-by: Vladimir Lavor --- plugins/vpp/ifplugin/data_resync.go | 24 +++--- plugins/vpp/ifplugin/interface_config.go | 4 +- plugins/vpp/ifplugin/vppcalls/api_vppcalls.go | 2 +- .../vppcalls/dump_interface_vppcalls.go | 79 ++++++++++--------- .../vppcalls/dump_interface_vppcalls_test.go | 8 +- plugins/vpp/l2plugin/bd_config.go | 2 +- .../vppcalls/bridge_domain_vppcalls_test.go | 17 ++-- 7 files changed, 73 insertions(+), 63 deletions(-) diff --git a/plugins/vpp/ifplugin/data_resync.go b/plugins/vpp/ifplugin/data_resync.go index 34394ab932..826634b947 100644 --- a/plugins/vpp/ifplugin/data_resync.go +++ b/plugins/vpp/ifplugin/data_resync.go @@ -67,46 +67,46 @@ func (plugin *InterfaceConfigurator) Resync(nbIfs []*intf.Interfaces_Interface) for vppIfIdx, vppIf := range vppIfs { if vppIfIdx == 0 { // Register local0 interface with zero index - if err := plugin.registerInterface(vppIf.VPPInternalName, vppIfIdx, &vppIf.Interfaces_Interface); err != nil { + if err := plugin.registerInterface(vppIf.Meta.InternalName, vppIfIdx, vppIf.Interface); err != nil { errs = append(errs, err) } continue } - if vppIf.Name == "" { + if vppIf.Interface.Name == "" { // If interface has no name, it is stored as unnamed and resolved later plugin.log.Debugf("RESYNC interfaces: interface %v has no name (tag)", vppIfIdx) - unnamedVppIfs[vppIfIdx] = &vppIf.Interfaces_Interface + unnamedVppIfs[vppIfIdx] = vppIf.Interface continue } var correlated bool for _, nbIf := range nbIfs { - if vppIf.Name == nbIf.Name { + if vppIf.Interface.Name == nbIf.Name { correlated = true // Register interface to mapping and VPP tag/index - if err := plugin.registerInterface(vppIf.Name, vppIfIdx, nbIf); err != nil { + if err := plugin.registerInterface(vppIf.Interface.Name, vppIfIdx, nbIf); err != nil { errs = append(errs, err) } // Calculate whether modification is needed - if plugin.isIfModified(nbIf, &vppIf.Interfaces_Interface) { - plugin.log.Debugf("RESYNC interfaces: modifying interface %v", vppIf.Name) - if err = plugin.ModifyVPPInterface(nbIf, &vppIf.Interfaces_Interface); err != nil { + if plugin.isIfModified(nbIf, vppIf.Interface) { + plugin.log.Debugf("RESYNC interfaces: modifying interface %v", vppIf.Interface.Name) + if err = plugin.ModifyVPPInterface(nbIf, vppIf.Interface); err != nil { plugin.log.Errorf("Error while modifying interface: %v", err) errs = append(errs, err) } } else { - plugin.log.Debugf("RESYNC interfaces: %v registered without additional changes", vppIf.Name) + plugin.log.Debugf("RESYNC interfaces: %v registered without additional changes", vppIf.Interface.Name) } break } } if !correlated { // Register interface before removal (to keep state consistent) - if err := plugin.registerInterface(vppIf.Name, vppIfIdx, &vppIf.Interfaces_Interface); err != nil { + if err := plugin.registerInterface(vppIf.Interface.Name, vppIfIdx, vppIf.Interface); err != nil { errs = append(errs, err) } // VPP interface is obsolete and will be removed (un-configured if physical device) - plugin.log.Debugf("RESYNC interfaces: removing obsolete interface %v", vppIf.Name) - if err = plugin.deleteVPPInterface(&vppIf.Interfaces_Interface, vppIfIdx); err != nil { + plugin.log.Debugf("RESYNC interfaces: removing obsolete interface %v", vppIf.Interface.Name) + if err = plugin.deleteVPPInterface(vppIf.Interface, vppIfIdx); err != nil { plugin.log.Errorf("Error while removing interface: %v", err) errs = append(errs, err) } diff --git a/plugins/vpp/ifplugin/interface_config.go b/plugins/vpp/ifplugin/interface_config.go index 583945d22e..bf5d757e11 100644 --- a/plugins/vpp/ifplugin/interface_config.go +++ b/plugins/vpp/ifplugin/interface_config.go @@ -277,7 +277,7 @@ func (plugin *InterfaceConfigurator) ConfigureVPPInterface(iface *intf.Interface if !ok || ifData == nil { return fmt.Errorf("set rx-placement failed, no data available for interface index %d", ifIdx) } - if err := plugin.ifHandler.SetRxPlacement(ifData.VPPInternalName, iface.RxPlacementSettings); err != nil { + if err := plugin.ifHandler.SetRxPlacement(ifData.Meta.InternalName, iface.RxPlacementSettings); err != nil { errs = append(errs, err) } } @@ -603,7 +603,7 @@ func (plugin *InterfaceConfigurator) modifyVPPInterface(newConfig *intf.Interfac if !ok || ifData == nil { return fmt.Errorf("set rx-placement for new config failed, no data available for interface index %d", ifIdx) } - if err := plugin.ifHandler.SetRxPlacement(ifData.VPPInternalName, newConfig.RxPlacementSettings); err != nil { + if err := plugin.ifHandler.SetRxPlacement(ifData.Meta.InternalName, newConfig.RxPlacementSettings); err != nil { wasError = err } } diff --git a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go index 98f156b8be..8f13928ee1 100644 --- a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go @@ -105,7 +105,7 @@ type IfVppRead interface { // LIMITATIONS: // - there is no af_packet dump binary API. We relay on naming conventions of the internal VPP interface names // - ip.IPAddressDetails has wrong internal structure, as a workaround we need to handle them as notifications - DumpInterfaces() (map[uint32]*Interface, error) + DumpInterfaces() (map[uint32]*InterfaceDetails, error) // GetInterfaceVRF assigns VRF table to interface GetInterfaceVRF(ifIdx uint32) (vrfID uint32, err error) // DumpMemifSocketDetails dumps memif socket details from the VPP diff --git a/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go index 8a9bb8c515..1f21ca103d 100644 --- a/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go @@ -35,16 +35,21 @@ import ( // Default VPP MTU value const defaultVPPMtu = 9216 -// Interface is the wrapper structure for the interface northbound API structure. -type Interface struct { - VPPInternalName string `json:"vpp_internal_name"` - ifnb.Interfaces_Interface +// InterfaceDetails is the wrapper structure for the interface northbound API structure. +type InterfaceDetails struct { + Interface *ifnb.Interfaces_Interface + Meta *InterfaceMeta } -func (handler *ifVppHandler) DumpInterfaces() (map[uint32]*Interface, error) { +// InterfaceMeta is combination of proto-modelled Interface data and VPP provided metadata +type InterfaceMeta struct { + InternalName string `json:"internal_name"` +} + +func (handler *ifVppHandler) DumpInterfaces() (map[uint32]*InterfaceDetails, error) { start := time.Now() // map for the resulting interfaces - ifs := make(map[uint32]*Interface) + ifs := make(map[uint32]*InterfaceDetails) // First, dump all interfaces to create initial data. reqCtx := handler.callsChannel.SendMultiRequest(&interfaces.SwInterfaceDump{}) @@ -59,9 +64,8 @@ func (handler *ifVppHandler) DumpInterfaces() (map[uint32]*Interface, error) { return nil, fmt.Errorf("failed to dump interface: %v", err) } - iface := &Interface{ - VPPInternalName: string(bytes.SplitN(ifDetails.InterfaceName, []byte{0x00}, 2)[0]), - Interfaces_Interface: ifnb.Interfaces_Interface{ + details := &InterfaceDetails{ + Interface: &ifnb.Interfaces_Interface{ Name: string(bytes.SplitN(ifDetails.Tag, []byte{0x00}, 2)[0]), Type: guessInterfaceType(string(ifDetails.InterfaceName)), // the type may be amended later by further dumps Enabled: ifDetails.AdminUpDown > 0, @@ -74,15 +78,18 @@ func (handler *ifVppHandler) DumpInterfaces() (map[uint32]*Interface, error) { return uint32(vppMtu) }(ifDetails.LinkMtu), }, + Meta: &InterfaceMeta{ + InternalName: string(bytes.SplitN(ifDetails.InterfaceName, []byte{0x00}, 2)[0]), + }, } // Fill name for physical interfaces (they are mostly without tag) - if iface.Type == ifnb.InterfaceType_ETHERNET_CSMACD { - iface.Name = iface.VPPInternalName + if details.Interface.Type == ifnb.InterfaceType_ETHERNET_CSMACD { + details.Interface.Name = details.Meta.InternalName } - ifs[ifDetails.SwIfIndex] = iface + ifs[ifDetails.SwIfIndex] = details - if iface.Type == ifnb.InterfaceType_AF_PACKET_INTERFACE { - fillAFPacketDetails(ifs, ifDetails.SwIfIndex, iface.VPPInternalName) + if details.Interface.Type == ifnb.InterfaceType_AF_PACKET_INTERFACE { + fillAFPacketDetails(ifs, ifDetails.SwIfIndex, details.Meta.InternalName) } } @@ -99,7 +106,7 @@ func (handler *ifVppHandler) DumpInterfaces() (map[uint32]*Interface, error) { if err != nil { return nil, err } - ifs[idx].Vrf = vrfID + ifs[idx].Interface.Vrf = vrfID } timeLog = measure.GetTimeLog(ip.IPAddressDump{}, handler.stopwatch) @@ -159,7 +166,7 @@ func (handler *ifVppHandler) DumpMemifSocketDetails() (map[string]uint32, error) } // dumpIPAddressDetails dumps IP address details of interfaces from VPP and fills them into the provided interface map. -func (handler *ifVppHandler) dumpIPAddressDetails(ifs map[uint32]*Interface, isIPv6 uint8, timeLog measure.StopWatchEntry) error { +func (handler *ifVppHandler) dumpIPAddressDetails(ifs map[uint32]*InterfaceDetails, isIPv6 uint8, timeLog measure.StopWatchEntry) error { // TODO: workaround for incorrect ip.IPAddressDetails message notifChan := make(chan api.Message, 100) subs, _ := handler.callsChannel.SubscribeNotification(notifChan, ip.NewIPAddressDetails) @@ -201,13 +208,13 @@ func (handler *ifVppHandler) dumpIPAddressDetails(ifs map[uint32]*Interface, isI } // processIPDetails processes ip.IPAddressDetails binary API message and fills the details into the provided interface map. -func (handler *ifVppHandler) processIPDetails(ifs map[uint32]*Interface, ipDetails *ip.IPAddressDetails) { +func (handler *ifVppHandler) processIPDetails(ifs map[uint32]*InterfaceDetails, ipDetails *ip.IPAddressDetails) { _, ifIdxExists := ifs[ipDetails.SwIfIndex] if !ifIdxExists { return } - if ifs[ipDetails.SwIfIndex].IpAddresses == nil { - ifs[ipDetails.SwIfIndex].IpAddresses = make([]string, 0) + if ifs[ipDetails.SwIfIndex].Interface.IpAddresses == nil { + ifs[ipDetails.SwIfIndex].Interface.IpAddresses = make([]string, 0) } var ipAddr string if ipDetails.IsIpv6 == 1 { @@ -215,19 +222,19 @@ func (handler *ifVppHandler) processIPDetails(ifs map[uint32]*Interface, ipDetai } else { ipAddr = fmt.Sprintf("%s/%d", net.IP(ipDetails.IP[:4]).To4().String(), uint32(ipDetails.PrefixLength)) } - ifs[ipDetails.SwIfIndex].IpAddresses = append(ifs[ipDetails.SwIfIndex].IpAddresses, ipAddr) + ifs[ipDetails.SwIfIndex].Interface.IpAddresses = append(ifs[ipDetails.SwIfIndex].Interface.IpAddresses, ipAddr) } // fillAFPacketDetails fills af_packet interface details into the provided interface map. -func fillAFPacketDetails(ifs map[uint32]*Interface, swIfIndex uint32, ifName string) { - ifs[swIfIndex].Afpacket = &ifnb.Interfaces_Interface_Afpacket{ +func fillAFPacketDetails(ifs map[uint32]*InterfaceDetails, swIfIndex uint32, ifName string) { + ifs[swIfIndex].Interface.Afpacket = &ifnb.Interfaces_Interface_Afpacket{ HostIfName: strings.TrimPrefix(ifName, "host-"), } - ifs[swIfIndex].Type = ifnb.InterfaceType_AF_PACKET_INTERFACE + ifs[swIfIndex].Interface.Type = ifnb.InterfaceType_AF_PACKET_INTERFACE } // dumpMemifDetails dumps memif interface details from VPP and fills them into the provided interface map. -func (handler *ifVppHandler) dumpMemifDetails(ifs map[uint32]*Interface) error { +func (handler *ifVppHandler) dumpMemifDetails(ifs map[uint32]*InterfaceDetails) error { // MemifDetails time measurement defer func(t time.Time) { handler.stopwatch.TimeLog(memif.MemifDump{}).LogTimeEntry(time.Since(t)) @@ -253,7 +260,7 @@ func (handler *ifVppHandler) dumpMemifDetails(ifs map[uint32]*Interface) error { if !ifIdxExists { continue } - ifs[memifDetails.SwIfIndex].Memif = &ifnb.Interfaces_Interface_Memif{ + ifs[memifDetails.SwIfIndex].Interface.Memif = &ifnb.Interfaces_Interface_Memif{ Master: memifDetails.Role == 0, Mode: memifModetoNB(memifDetails.Mode), Id: memifDetails.ID, @@ -272,14 +279,14 @@ func (handler *ifVppHandler) dumpMemifDetails(ifs map[uint32]*Interface) error { BufferSize: uint32(memifDetails.BufferSize), // TODO: RxQueues, TxQueues - not available in the binary API } - ifs[memifDetails.SwIfIndex].Type = ifnb.InterfaceType_MEMORY_INTERFACE + ifs[memifDetails.SwIfIndex].Interface.Type = ifnb.InterfaceType_MEMORY_INTERFACE } return nil } // dumpTapDetails dumps tap interface details from VPP and fills them into the provided interface map. -func (handler *ifVppHandler) dumpTapDetails(ifs map[uint32]*Interface) error { +func (handler *ifVppHandler) dumpTapDetails(ifs map[uint32]*InterfaceDetails) error { // SwInterfaceTapDump time measurement defer func(t time.Time) { handler.stopwatch.TimeLog(tap.SwInterfaceTapDump{}).LogTimeEntry(time.Since(t)) @@ -300,11 +307,11 @@ func (handler *ifVppHandler) dumpTapDetails(ifs map[uint32]*Interface) error { if !ifIdxExists { continue } - ifs[tapDetails.SwIfIndex].Tap = &ifnb.Interfaces_Interface_Tap{ + ifs[tapDetails.SwIfIndex].Interface.Tap = &ifnb.Interfaces_Interface_Tap{ Version: 1, HostIfName: string(bytes.SplitN(tapDetails.DevName, []byte{0x00}, 2)[0]), } - ifs[tapDetails.SwIfIndex].Type = ifnb.InterfaceType_TAP_INTERFACE + ifs[tapDetails.SwIfIndex].Interface.Type = ifnb.InterfaceType_TAP_INTERFACE } // TAP v.2 @@ -322,20 +329,20 @@ func (handler *ifVppHandler) dumpTapDetails(ifs map[uint32]*Interface) error { if !ifIdxExists { continue } - ifs[tapDetails.SwIfIndex].Tap = &ifnb.Interfaces_Interface_Tap{ + ifs[tapDetails.SwIfIndex].Interface.Tap = &ifnb.Interfaces_Interface_Tap{ Version: 2, HostIfName: string(bytes.SplitN(tapDetails.HostIfName, []byte{0x00}, 2)[0]), // Other parameters are not not yet part of the dump. } - ifs[tapDetails.SwIfIndex].Type = ifnb.InterfaceType_TAP_INTERFACE + ifs[tapDetails.SwIfIndex].Interface.Type = ifnb.InterfaceType_TAP_INTERFACE } return nil } // dumpVxlanDetails dumps VXLAN interface details from VPP and fills them into the provided interface map. -func (handler *ifVppHandler) dumpVxlanDetails(ifs map[uint32]*Interface) error { +func (handler *ifVppHandler) dumpVxlanDetails(ifs map[uint32]*InterfaceDetails) error { // VxlanTunnelDump time measurement defer func(t time.Time) { handler.stopwatch.TimeLog(vxlan.VxlanTunnelDump{}).LogTimeEntry(time.Since(t)) @@ -359,25 +366,25 @@ func (handler *ifVppHandler) dumpVxlanDetails(ifs map[uint32]*Interface) error { var multicastIfName string _, exists := ifs[vxlanDetails.McastSwIfIndex] if exists { - multicastIfName = ifs[vxlanDetails.McastSwIfIndex].Name + multicastIfName = ifs[vxlanDetails.McastSwIfIndex].Interface.Name } if vxlanDetails.IsIpv6 == 1 { - ifs[vxlanDetails.SwIfIndex].Vxlan = &ifnb.Interfaces_Interface_Vxlan{ + ifs[vxlanDetails.SwIfIndex].Interface.Vxlan = &ifnb.Interfaces_Interface_Vxlan{ Multicast: multicastIfName, SrcAddress: net.IP(vxlanDetails.SrcAddress).To16().String(), DstAddress: net.IP(vxlanDetails.DstAddress).To16().String(), Vni: vxlanDetails.Vni, } } else { - ifs[vxlanDetails.SwIfIndex].Vxlan = &ifnb.Interfaces_Interface_Vxlan{ + ifs[vxlanDetails.SwIfIndex].Interface.Vxlan = &ifnb.Interfaces_Interface_Vxlan{ Multicast: multicastIfName, SrcAddress: net.IP(vxlanDetails.SrcAddress[:4]).To4().String(), DstAddress: net.IP(vxlanDetails.DstAddress[:4]).To4().String(), Vni: vxlanDetails.Vni, } } - ifs[vxlanDetails.SwIfIndex].Type = ifnb.InterfaceType_VXLAN_TUNNEL + ifs[vxlanDetails.SwIfIndex].Interface.Type = ifnb.InterfaceType_VXLAN_TUNNEL } return nil diff --git a/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls_test.go index 08a299b094..ab319ce088 100644 --- a/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls_test.go @@ -379,7 +379,7 @@ func TestDumpInterfacesVxLan(t *testing.T) { intfs, err := ifHandler.DumpInterfaces() Expect(err).To(BeNil()) Expect(intfs).To(HaveLen(1)) - intface := intfs[0] + intface := intfs[0].Interface // Check vxlan Expect(intface.Vxlan.SrcAddress).To(Equal("dead:beef:feed:face:cafe:babe:baad:c0de")) @@ -439,7 +439,7 @@ func TestDumpInterfacesHost(t *testing.T) { intfs, err := ifHandler.DumpInterfaces() Expect(err).To(BeNil()) Expect(intfs).To(HaveLen(1)) - intface := intfs[0] + intface := intfs[0].Interface // Check interface data Expect(intface.Afpacket.HostIfName).To(Equal("localhost")) @@ -509,7 +509,7 @@ func TestDumpInterfacesMemif(t *testing.T) { intfs, err := ifHandler.DumpInterfaces() Expect(err).To(BeNil()) Expect(intfs).To(HaveLen(1)) - intface := intfs[0] + intface := intfs[0].Interface // Check memif Expect(intface.Memif.SocketFilename).To(Equal("test")) @@ -605,7 +605,7 @@ func TestDumpInterfacesFull(t *testing.T) { Expect(err).To(BeNil()) Expect(intfs).To(HaveLen(1)) - intface := intfs[0] + intface := intfs[0].Interface // This is last checked type, so it will be equal to that Expect(intface.Type).To(Equal(interfaces2.InterfaceType_VXLAN_TUNNEL)) diff --git a/plugins/vpp/l2plugin/bd_config.go b/plugins/vpp/l2plugin/bd_config.go index 1b37b40405..3857db1b23 100644 --- a/plugins/vpp/l2plugin/bd_config.go +++ b/plugins/vpp/l2plugin/bd_config.go @@ -107,7 +107,7 @@ func (plugin *BDConfigurator) Init(logger logging.PluginLogger, goVppMux govppmu return err } - if plugin.bdHandler, err = vppcalls.NewBridgeDomainVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch); err != nil { + if plugin.bdHandler, err = vppcalls.NewBridgeDomainVppHandler(plugin.vppChan, plugin.ifIndexes, plugin.log, plugin.stopwatch); err != nil { return err } diff --git a/plugins/vpp/l2plugin/vppcalls/bridge_domain_vppcalls_test.go b/plugins/vpp/l2plugin/vppcalls/bridge_domain_vppcalls_test.go index 46b4037772..ba994ae227 100644 --- a/plugins/vpp/l2plugin/vppcalls/bridge_domain_vppcalls_test.go +++ b/plugins/vpp/l2plugin/vppcalls/bridge_domain_vppcalls_test.go @@ -18,7 +18,9 @@ import ( "testing" "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/vpp-agent/idxvpp/nametoidx" l2ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/l2" "github.com/ligato/vpp-agent/tests/vppcallmock" @@ -84,7 +86,7 @@ var deleteTestDataOutBd *l2ba.BridgeDomainAddDel = &l2ba.BridgeDomainAddDel{ } func TestVppAddBridgeDomain(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BridgeDomainAddDelReply{}) @@ -95,7 +97,7 @@ func TestVppAddBridgeDomain(t *testing.T) { } func TestVppAddBridgeDomainError(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BridgeDomainAddDelReply{Retval: 1}) @@ -109,7 +111,7 @@ func TestVppAddBridgeDomainError(t *testing.T) { } func TestVppDeleteBridgeDomain(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BridgeDomainAddDelReply{}) @@ -120,7 +122,7 @@ func TestVppDeleteBridgeDomain(t *testing.T) { } func TestVppDeleteBridgeDomainError(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BridgeDomainAddDelReply{Retval: 1}) @@ -133,10 +135,11 @@ func TestVppDeleteBridgeDomainError(t *testing.T) { Expect(err).Should(HaveOccurred()) } -func bdTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.BridgeDomainVppAPI) { +func bdTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.BridgeDomainVppAPI, ifaceidx.SwIfIndexRW) { ctx := vppcallmock.SetupTestCtx(t) log := logrus.NewLogger("test-log") - bdHandler, err := vppcalls.NewBridgeDomainVppHandler(ctx.MockChannel, log, nil) + ifIndex := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "bd-test-ifidx", nil)) + bdHandler, err := vppcalls.NewBridgeDomainVppHandler(ctx.MockChannel, ifIndex, log, nil) Expect(err).To(BeNil()) - return ctx, bdHandler + return ctx, bdHandler, ifIndex } From 4c6b69bd47707e5187b0e8b75a1c123e2544a2a6 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Wed, 25 Jul 2018 12:38:40 +0200 Subject: [PATCH 048/174] improved dump for fibs and xconnects Signed-off-by: Vladimir Lavor --- .../vppcalls/dump_interface_vppcalls.go | 2 + plugins/vpp/l2plugin/data_resync.go | 34 ++-- plugins/vpp/l2plugin/fib_config.go | 3 +- plugins/vpp/l2plugin/vppcalls/api_vppcalls.go | 22 ++- .../vppcalls/arp_term_vppcalls_test.go | 12 +- .../vpp/l2plugin/vppcalls/dump_vppcalls.go | 177 ++++++++++++------ .../l2plugin/vppcalls/dump_vppcalls_test.go | 123 +++++++++--- .../vppcalls/interface_vppcalls_test.go | 20 +- .../l2plugin/vppcalls/l2fib_vppcalls_test.go | 21 ++- .../vppcalls/xconnect_vppcalls_test.go | 13 +- plugins/vpp/l2plugin/xconnect_config.go | 2 +- plugins/vpp/l3plugin/route_config.go | 2 +- 12 files changed, 290 insertions(+), 141 deletions(-) diff --git a/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go index 1f21ca103d..c8f766f505 100644 --- a/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go @@ -43,6 +43,7 @@ type InterfaceDetails struct { // InterfaceMeta is combination of proto-modelled Interface data and VPP provided metadata type InterfaceMeta struct { + Tag string `json:"tag"` InternalName string `json:"internal_name"` } @@ -79,6 +80,7 @@ func (handler *ifVppHandler) DumpInterfaces() (map[uint32]*InterfaceDetails, err }(ifDetails.LinkMtu), }, Meta: &InterfaceMeta{ + Tag: string(bytes.SplitN(ifDetails.Tag, []byte{0x00}, 2)[0]), InternalName: string(bytes.SplitN(ifDetails.InterfaceName, []byte{0x00}, 2)[0]), }, } diff --git a/plugins/vpp/l2plugin/data_resync.go b/plugins/vpp/l2plugin/data_resync.go index a3747a0d4d..7777763434 100644 --- a/plugins/vpp/l2plugin/data_resync.go +++ b/plugins/vpp/l2plugin/data_resync.go @@ -44,7 +44,7 @@ func (plugin *BDConfigurator) Resync(nbBDs []*l2.BridgeDomains_BridgeDomain) err var wasErr error for vppBDIdx, vppBD := range vppBDs { // tag is bridge domain name (unique identifier) - tag := vppBD.Name + tag := vppBD.Bd.Name // Find NB bridge domain with the same name var nbBD *l2.BridgeDomains_BridgeDomain for _, nbBDConfig := range nbBDs { @@ -67,12 +67,12 @@ func (plugin *BDConfigurator) Resync(nbBDs []*l2.BridgeDomains_BridgeDomain) err // Bridge domain exists, validate valid, recreate := plugin.vppValidateBridgeDomainBVI(nbBD, &l2.BridgeDomains_BridgeDomain{ Name: tag, - Learn: vppBD.Learn, - Flood: vppBD.Flood, - Forward: vppBD.Forward, - UnknownUnicastFlood: vppBD.UnknownUnicastFlood, - ArpTermination: vppBD.ArpTermination, - MacAge: vppBD.MacAge, + Learn: vppBD.Bd.Learn, + Flood: vppBD.Bd.Flood, + Forward: vppBD.Bd.Forward, + UnknownUnicastFlood: vppBD.Bd.UnknownUnicastFlood, + ArpTermination: vppBD.Bd.ArpTermination, + MacAge: vppBD.Bd.MacAge, }) if !valid { plugin.log.Errorf("RESYNC bridge domain: new config %v is invalid", nbBD.Name) @@ -106,7 +106,7 @@ func (plugin *BDConfigurator) Resync(nbBDs []*l2.BridgeDomains_BridgeDomain) err var interfacesToUnset []*l2.BridgeDomains_BridgeDomain_Interfaces for _, iface := range interfaceMap { interfacesToUnset = append(interfacesToUnset, &l2.BridgeDomains_BridgeDomain_Interfaces{ - Name: iface.Name, + Name: iface.Interface.Name, }) } // Remove interfaces from bridge domain. Attempt to unset interface which does not belong to the bridge domain @@ -183,20 +183,20 @@ func (plugin *FIBConfigurator) Resync(nbFIBs []*l2.FibTable_FibEntry) error { } // Bridge domain bdIdx, _, found := plugin.bdIndexes.LookupIdx(nbFIB.BridgeDomain) - if !found || vppFIBdata.BridgeDomainIdx != bdIdx { + if !found || vppFIBdata.Meta.BdID != bdIdx { continue } // BVI - if vppFIBdata.BridgedVirtualInterface != nbFIB.BridgedVirtualInterface { + if vppFIBdata.Fib.BridgedVirtualInterface != nbFIB.BridgedVirtualInterface { continue } // Interface swIdx, _, found := plugin.ifIndexes.LookupIdx(nbFIB.OutgoingInterface) - if !found || vppFIBdata.OutgoingInterfaceSwIfIdx != swIdx { + if !found || vppFIBdata.Meta.IfIdx != swIdx { continue } // Is static - if vppFIBdata.StaticConfig != nbFIB.StaticConfig { + if vppFIBdata.Fib.StaticConfig != nbFIB.StaticConfig { continue } @@ -209,10 +209,10 @@ func (plugin *FIBConfigurator) Resync(nbFIBs []*l2.FibTable_FibEntry) error { if exists { plugin.fibIndexes.RegisterName(vppFIBmac, plugin.fibIndexSeq, meta) plugin.fibIndexSeq++ - } else if vppFIBdata.StaticConfig { + } else if vppFIBdata.Fib.StaticConfig { // Get appropriate interface/bridge domain names - ifIdx, _, ifFound := plugin.ifIndexes.LookupName(vppFIBdata.OutgoingInterfaceSwIfIdx) - bdIdx, _, bdFound := plugin.bdIndexes.LookupName(vppFIBdata.BridgeDomainIdx) + ifIdx, _, ifFound := plugin.ifIndexes.LookupName(vppFIBdata.Meta.IfIdx) + bdIdx, _, bdFound := plugin.bdIndexes.LookupName(vppFIBdata.Meta.BdID) if !ifFound || !bdFound { // FIB entry cannot be removed without these informations and // it should be removed by the VPP @@ -275,8 +275,8 @@ func (plugin *XConnectConfigurator) Resync(nbXConns []*l2.XConnectPairs_XConnect var rxIfName, txIfName string for _, nbXConn := range nbXConns { // Find receive and transmit interface - rxIfName, _, rxIfFound := plugin.ifIndexes.LookupName(vppXConn.ReceiveInterfaceSwIfIdx) - txIfName, _, txIfFound := plugin.ifIndexes.LookupName(vppXConn.TransmitInterfaceSwIfIdx) + rxIfName, _, rxIfFound := plugin.ifIndexes.LookupName(vppXConn.Meta.ReceiveInterfaceSwIfIdx) + txIfName, _, txIfFound := plugin.ifIndexes.LookupName(vppXConn.Meta.TransmitInterfaceSwIfIdx) if !rxIfFound || !txIfFound { continue } diff --git a/plugins/vpp/l2plugin/fib_config.go b/plugins/vpp/l2plugin/fib_config.go index 18508e3e00..d6dd5efad7 100644 --- a/plugins/vpp/l2plugin/fib_config.go +++ b/plugins/vpp/l2plugin/fib_config.go @@ -93,7 +93,8 @@ func (plugin *FIBConfigurator) Init(logger logging.PluginLogger, goVppMux govppm // VPP calls helper object requestChan := make(chan *vppcalls.FibLogicalReq) - if plugin.fibHandler, err = vppcalls.NewFibVppHandler(plugin.syncChannel, plugin.asyncChannel, requestChan, plugin.log, plugin.stopwatch); err != nil { + if plugin.fibHandler, err = vppcalls.NewFibVppHandler(plugin.syncChannel, plugin.asyncChannel, requestChan, plugin.ifIndexes, + plugin.bdIndexes, plugin.log, plugin.stopwatch); err != nil { return err } diff --git a/plugins/vpp/l2plugin/vppcalls/api_vppcalls.go b/plugins/vpp/l2plugin/vppcalls/api_vppcalls.go index 0f3a2a41e1..6950b15534 100644 --- a/plugins/vpp/l2plugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/l2plugin/vppcalls/api_vppcalls.go @@ -19,6 +19,7 @@ import ( "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" + "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/l2idx" "github.com/ligato/vpp-agent/plugins/vpp/model/l2" ) @@ -60,7 +61,7 @@ type BridgeDomainVppRead interface { // LIMITATIONS: // - not able to dump ArpTerminationTable - missing binary API // - DumpBridgeDomains() (map[uint32]*BridgeDomain, error) + DumpBridgeDomains() (map[uint32]*BridgeDomainDetails, error) } // FibVppAPI provides methods for managing FIBs @@ -81,7 +82,7 @@ type FibVppWrite interface { type FibVppRead interface { // DumpFIBTableEntries dumps VPP FIB table entries into the northbound API data structure // map indexed by destination MAC address. - DumpFIBTableEntries() (map[string]*FIBTableEntry, error) + DumpFIBTableEntries() (map[string]*FibTableDetails, error) // WatchFIBReplies handles L2 FIB add/del requests WatchFIBReplies() } @@ -104,13 +105,14 @@ type XConnectVppWrite interface { type XConnectVppRead interface { // DumpXConnectPairs dumps VPP xconnect pair data into the northbound API data structure // map indexed by rx interface index. - DumpXConnectPairs() (map[uint32]*XConnectPairs, error) + DumpXConnectPairs() (map[uint32]*XConnectDetails, error) } // bridgeDomainVppHandler is accessor for bridge domain-related vppcalls methods type bridgeDomainVppHandler struct { stopwatch *measure.Stopwatch callsChannel govppapi.Channel + ifIndexes ifaceidx.SwIfIndex log logging.Logger } @@ -120,6 +122,8 @@ type fibVppHandler struct { syncCallsChannel govppapi.Channel asyncCallsChannel govppapi.Channel requestChan chan *FibLogicalReq + ifIndexes ifaceidx.SwIfIndex + bdIndexes l2idx.BDIndex log logging.Logger } @@ -127,14 +131,16 @@ type fibVppHandler struct { type xConnectVppHandler struct { stopwatch *measure.Stopwatch callsChannel govppapi.Channel + ifIndexes ifaceidx.SwIfIndex log logging.Logger } // NewBridgeDomainVppHandler creates new instance of bridge domain vppcalls handler -func NewBridgeDomainVppHandler(callsChan govppapi.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*bridgeDomainVppHandler, error) { +func NewBridgeDomainVppHandler(callsChan govppapi.Channel, ifIndexes ifaceidx.SwIfIndex, log logging.Logger, stopwatch *measure.Stopwatch) (*bridgeDomainVppHandler, error) { handler := &bridgeDomainVppHandler{ callsChannel: callsChan, stopwatch: stopwatch, + ifIndexes: ifIndexes, log: log, } if err := handler.callsChannel.CheckMessageCompatibility(BridgeDomainMessages...); err != nil { @@ -145,12 +151,15 @@ func NewBridgeDomainVppHandler(callsChan govppapi.Channel, log logging.Logger, s } // NewFibVppHandler creates new instance of FIB vppcalls handler -func NewFibVppHandler(syncChan, asyncChan govppapi.Channel, reqChan chan *FibLogicalReq, log logging.Logger, stopwatch *measure.Stopwatch) (*fibVppHandler, error) { +func NewFibVppHandler(syncChan, asyncChan govppapi.Channel, reqChan chan *FibLogicalReq, ifIndexes ifaceidx.SwIfIndex, bdIndexes l2idx.BDIndex, + log logging.Logger, stopwatch *measure.Stopwatch) (*fibVppHandler, error) { handler := &fibVppHandler{ syncCallsChannel: syncChan, asyncCallsChannel: asyncChan, requestChan: reqChan, stopwatch: stopwatch, + ifIndexes: ifIndexes, + bdIndexes: bdIndexes, log: log, } if err := handler.syncCallsChannel.CheckMessageCompatibility(L2FibMessages...); err != nil { @@ -161,10 +170,11 @@ func NewFibVppHandler(syncChan, asyncChan govppapi.Channel, reqChan chan *FibLog } // NewXConnectVppHandler creates new instance of cross connect vppcalls handler -func NewXConnectVppHandler(callsChan govppapi.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*xConnectVppHandler, error) { +func NewXConnectVppHandler(callsChan govppapi.Channel, ifIndexes ifaceidx.SwIfIndex, log logging.Logger, stopwatch *measure.Stopwatch) (*xConnectVppHandler, error) { handler := &xConnectVppHandler{ callsChannel: callsChan, stopwatch: stopwatch, + ifIndexes: ifIndexes, log: log, } if err := handler.callsChannel.CheckMessageCompatibility(XConnectMessages...); err != nil { diff --git a/plugins/vpp/l2plugin/vppcalls/arp_term_vppcalls_test.go b/plugins/vpp/l2plugin/vppcalls/arp_term_vppcalls_test.go index 1a4a4cb114..f4cd3335dc 100644 --- a/plugins/vpp/l2plugin/vppcalls/arp_term_vppcalls_test.go +++ b/plugins/vpp/l2plugin/vppcalls/arp_term_vppcalls_test.go @@ -22,7 +22,7 @@ import ( ) func TestVppAddArpTerminationTableEntry(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BdIPMacAddDelReply{}) @@ -41,7 +41,7 @@ func TestVppAddArpTerminationTableEntry(t *testing.T) { } func TestVppAddArpTerminationTableEntryIPv6(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BdIPMacAddDelReply{}) @@ -59,7 +59,7 @@ func TestVppAddArpTerminationTableEntryIPv6(t *testing.T) { } func TestVppRemoveArpTerminationTableEntry(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BdIPMacAddDelReply{}) @@ -77,7 +77,7 @@ func TestVppRemoveArpTerminationTableEntry(t *testing.T) { } func TestVppArpTerminationTableEntryMacError(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BdIPMacAddDelReply{}) @@ -90,7 +90,7 @@ func TestVppArpTerminationTableEntryMacError(t *testing.T) { } func TestVppArpTerminationTableEntryIpError(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BdIPMacAddDelReply{}) @@ -103,7 +103,7 @@ func TestVppArpTerminationTableEntryIpError(t *testing.T) { } func TestVppArpTerminationTableEntryError(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2ba.BdIPMacAddDelReply{ diff --git a/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go index de4882ba3b..fa2ad1c72e 100644 --- a/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go +++ b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go @@ -23,49 +23,26 @@ import ( l2nb "github.com/ligato/vpp-agent/plugins/vpp/model/l2" ) -func (handler *bridgeDomainVppHandler) DumpBridgeDomainIDs() ([]uint32, error) { - defer func(t time.Time) { - handler.stopwatch.TimeLog(l2ba.BridgeDomainDump{}).LogTimeEntry(time.Since(t)) - }(time.Now()) - - req := &l2ba.BridgeDomainDump{BdID: ^uint32(0)} - activeDomains := make([]uint32, 1) - reqCtx := handler.callsChannel.SendMultiRequest(req) - for { - msg := &l2ba.BridgeDomainDetails{} - stop, err := reqCtx.ReceiveReply(msg) - if err != nil { - return nil, err - } - if stop { - break - } - activeDomains = append(activeDomains, msg.BdID) - } - - return activeDomains, nil -} - -// BridgeDomain is the wrapper structure for the bridge domain northbound API structure. +// BridgeDomainDetails is the wrapper structure for the bridge domain northbound API structure. // NOTE: Interfaces in BridgeDomains_BridgeDomain is overridden by the local Interfaces member. -type BridgeDomain struct { - Interfaces []*BridgeDomainInterface `json:"interfaces"` - l2nb.BridgeDomains_BridgeDomain +type BridgeDomainDetails struct { + Bd *l2nb.BridgeDomains_BridgeDomain `json:"bridge_domain"` + Meta *BridgeDomainMeta } -// BridgeDomainInterface is the wrapper structure for the bridge domain interface northbound API structure. -type BridgeDomainInterface struct { - SwIfIndex uint32 `json:"sw_if_index"` - l2nb.BridgeDomains_BridgeDomain_Interfaces +// BridgeDomainMeta contains bridge domain interface name/index map +type BridgeDomainMeta struct { + BdID uint32 + BdIfIdxToName map[uint32]string } -func (handler *bridgeDomainVppHandler) DumpBridgeDomains() (map[uint32]*BridgeDomain, error) { +func (handler *bridgeDomainVppHandler) DumpBridgeDomains() (map[uint32]*BridgeDomainDetails, error) { defer func(t time.Time) { handler.stopwatch.TimeLog(l2ba.BridgeDomainDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) // map for the resulting BDs - bds := make(map[uint32]*BridgeDomain) + bds := make(map[uint32]*BridgeDomainDetails) // First, dump all interfaces to create initial data. reqCtx := handler.callsChannel.SendMultiRequest(&l2ba.BridgeDomainDump{BdID: ^uint32(0)}) @@ -80,10 +57,9 @@ func (handler *bridgeDomainVppHandler) DumpBridgeDomains() (map[uint32]*BridgeDo return nil, err } - // bridge domain details - bds[bdDetails.BdID] = &BridgeDomain{ - Interfaces: []*BridgeDomainInterface{}, - BridgeDomains_BridgeDomain: l2nb.BridgeDomains_BridgeDomain{ + // base bridge domain details + bds[bdDetails.BdID] = &BridgeDomainDetails{ + Bd: &l2nb.BridgeDomains_BridgeDomain{ Name: string(bytes.Replace(bdDetails.BdTag, []byte{0x00}, []byte{}, -1)), Flood: bdDetails.Flood > 0, UnknownUnicastFlood: bdDetails.UuFlood > 0, @@ -92,34 +68,80 @@ func (handler *bridgeDomainVppHandler) DumpBridgeDomains() (map[uint32]*BridgeDo ArpTermination: bdDetails.ArpTerm > 0, MacAge: uint32(bdDetails.MacAge), }, + Meta: &BridgeDomainMeta{ + BdID: bdDetails.BdID, + BdIfIdxToName: make(map[uint32]string), + }, } - // bridge domain interfaces + // bridge domain interfaces and metadata for _, iface := range bdDetails.SwIfDetails { - bds[bdDetails.BdID].Interfaces = append(bds[bdDetails.BdID].Interfaces, &BridgeDomainInterface{ - SwIfIndex: iface.SwIfIndex, + ifName, _, exists := handler.ifIndexes.LookupName(iface.SwIfIndex) + if !exists { + handler.log.Warnf("Bridge domain dump: interface name for index %s not found", iface.SwIfIndex) + continue + } + // Bvi + var bvi bool + if iface.SwIfIndex == bdDetails.BviSwIfIndex { + bvi = true + } + // Add metadata entry + bds[bdDetails.BdID].Meta.BdIfIdxToName[iface.SwIfIndex] = ifName + // Add interface entry + bds[bdDetails.BdID].Bd.Interfaces = append(bds[bdDetails.BdID].Bd.Interfaces, &l2nb.BridgeDomains_BridgeDomain_Interfaces{ + Name: ifName, + BridgedVirtualInterface: bvi, + SplitHorizonGroup: uint32(iface.Shg), }) } - } return bds, nil } -// FIBTableEntry is the wrapper structure for the FIB table entry northbound API structure. -type FIBTableEntry struct { - BridgeDomainIdx uint32 `json:"bridge_domain_idx"` - OutgoingInterfaceSwIfIdx uint32 `json:"outgoing_interface_sw_if_idx"` - l2nb.FibTable_FibEntry +func (handler *bridgeDomainVppHandler) DumpBridgeDomainIDs() ([]uint32, error) { + defer func(t time.Time) { + handler.stopwatch.TimeLog(l2ba.BridgeDomainDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) + + req := &l2ba.BridgeDomainDump{BdID: ^uint32(0)} + activeDomains := make([]uint32, 1) + reqCtx := handler.callsChannel.SendMultiRequest(req) + for { + msg := &l2ba.BridgeDomainDetails{} + stop, err := reqCtx.ReceiveReply(msg) + if err != nil { + return nil, err + } + if stop { + break + } + activeDomains = append(activeDomains, msg.BdID) + } + + return activeDomains, nil } -func (handler *fibVppHandler) DumpFIBTableEntries() (map[string]*FIBTableEntry, error) { +// FIBTableDetails is the wrapper structure for the FIB table entry northbound API structure. +type FibTableDetails struct { + Fib *l2nb.FibTable_FibEntry + Meta *FibMeta +} + +// FibMeta contains FIB interface and bridge domain name/index map +type FibMeta struct { + BdID uint32 `json:"bridge_domain_idx"` + IfIdx uint32 `json:"outgoing_interface_sw_if_idx"` +} + +func (handler *fibVppHandler) DumpFIBTableEntries() (map[string]*FibTableDetails, error) { defer func(t time.Time) { handler.stopwatch.TimeLog(l2ba.L2FibTableDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) // map for the resulting FIBs - fibs := make(map[string]*FIBTableEntry) + fibs := make(map[string]*FibTableDetails) reqCtx := handler.syncCallsChannel.SendMultiRequest(&l2ba.L2FibTableDump{BdID: ^uint32(0)}) for { @@ -140,34 +162,55 @@ func (handler *fibVppHandler) DumpFIBTableEntries() (map[string]*FIBTableEntry, action = l2nb.FibTable_FibEntry_FORWARD } - fibs[mac] = &FIBTableEntry{ - BridgeDomainIdx: uint32(fibDetails.BdID), - OutgoingInterfaceSwIfIdx: fibDetails.SwIfIndex, - FibTable_FibEntry: l2nb.FibTable_FibEntry{ + // Interface name + ifName, _, exists := handler.ifIndexes.LookupName(fibDetails.SwIfIndex) + if !exists { + handler.log.Warnf("FIB dump: interface name for index %s not found", fibDetails.SwIfIndex) + } + // Bridge domain name + bdName, _, exists := handler.bdIndexes.LookupName(fibDetails.BdID) + if !exists { + handler.log.Warnf("FIB dump: bridge domain name for index %s not found", fibDetails.BdID) + } + + fibs[mac] = &FibTableDetails{ + Fib: &l2nb.FibTable_FibEntry{ PhysAddress: mac, + BridgeDomain: bdName, Action: action, + OutgoingInterface: ifName, StaticConfig: fibDetails.StaticMac > 0, BridgedVirtualInterface: fibDetails.BviMac > 0, }, + Meta: &FibMeta{ + BdID: fibDetails.BdID, + IfIdx: fibDetails.SwIfIndex, + }, } } return fibs, nil } -// XConnectPairs is the wrapper structure for the l2 xconnect northbound API structure. -type XConnectPairs struct { +// XConnectDetails is the wrapper structure for the l2 xconnect northbound API structure. +type XConnectDetails struct { + Xc *l2nb.XConnectPairs_XConnectPair + Meta *XcMeta +} + +// XcMeta contains cross connect rx/tx interface indexes +type XcMeta struct { ReceiveInterfaceSwIfIdx uint32 `json:"receive_interface_sw_if_idx"` TransmitInterfaceSwIfIdx uint32 `json:"transmit_interface_sw_if_idx"` } -func (handler *xConnectVppHandler) DumpXConnectPairs() (map[uint32]*XConnectPairs, error) { +func (handler *xConnectVppHandler) DumpXConnectPairs() (map[uint32]*XConnectDetails, error) { defer func(t time.Time) { handler.stopwatch.TimeLog(l2ba.L2XconnectDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) // map for the resulting xconnect pairs - xpairs := make(map[uint32]*XConnectPairs) + xpairs := make(map[uint32]*XConnectDetails) reqCtx := handler.callsChannel.SendMultiRequest(&l2ba.L2XconnectDump{}) for { @@ -180,9 +223,25 @@ func (handler *xConnectVppHandler) DumpXConnectPairs() (map[uint32]*XConnectPair return nil, err } - xpairs[pairs.RxSwIfIndex] = &XConnectPairs{ - ReceiveInterfaceSwIfIdx: pairs.RxSwIfIndex, - TransmitInterfaceSwIfIdx: pairs.TxSwIfIndex, + // Find interface names + rxIfaceName, _, exists := handler.ifIndexes.LookupName(pairs.RxSwIfIndex) + if !exists { + handler.log.Warnf("XConnect dump: rx interface name for index %s not found", pairs.RxSwIfIndex) + } + txIfaceName, _, exists := handler.ifIndexes.LookupName(pairs.TxSwIfIndex) + if !exists { + handler.log.Warnf("XConnect dump: tx interface name for index %s not found", pairs.TxSwIfIndex) + } + + xpairs[pairs.RxSwIfIndex] = &XConnectDetails{ + Xc: &l2nb.XConnectPairs_XConnectPair{ + ReceiveInterface: rxIfaceName, + TransmitInterface: txIfaceName, + }, + Meta: &XcMeta{ + ReceiveInterfaceSwIfIdx: pairs.RxSwIfIndex, + TransmitInterfaceSwIfIdx: pairs.TxSwIfIndex, + }, } } diff --git a/plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go index f65a2289e2..08e307af00 100644 --- a/plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go +++ b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go @@ -44,31 +44,57 @@ var testDataInMessagesBDs = []govppapi.Message{ }, } -var testDataOutMessage = []*vppcalls.BridgeDomain{ +var testDataOutMessage = []*vppcalls.BridgeDomainDetails{ { - Interfaces: []*vppcalls.BridgeDomainInterface{ - {SwIfIndex: 5}, - {SwIfIndex: 7}, - }, - BridgeDomains_BridgeDomain: l2nb.BridgeDomains_BridgeDomain{ + Bd: &l2nb.BridgeDomains_BridgeDomain{ Flood: true, UnknownUnicastFlood: true, Forward: true, Learn: true, ArpTermination: true, - MacAge: 140}, - }, { - Interfaces: []*vppcalls.BridgeDomainInterface{ - {SwIfIndex: 5}, - {SwIfIndex: 8}, + MacAge: 140, + Interfaces: []*l2nb.BridgeDomains_BridgeDomain_Interfaces{ + { + Name: "if1", + }, + { + Name: "if2", + }, + }, }, - BridgeDomains_BridgeDomain: l2nb.BridgeDomains_BridgeDomain{ + Meta: &vppcalls.BridgeDomainMeta{ + BdIfIdxToName: func() map[uint32]string { + meta := make(map[uint32]string) + meta[5] = "if1" + meta[7] = "if2" + return meta + }(), + }, + }, { + Bd: &l2nb.BridgeDomains_BridgeDomain{ Flood: false, UnknownUnicastFlood: false, Forward: false, Learn: false, ArpTermination: false, - MacAge: 141}, + MacAge: 141, + Interfaces: []*l2nb.BridgeDomains_BridgeDomain_Interfaces{ + { + Name: "if1", + }, + { + Name: "if3", + }, + }, + }, + Meta: &vppcalls.BridgeDomainMeta{ + BdIfIdxToName: func() map[uint32]string { + meta := make(map[uint32]string) + meta[5] = "if1" + meta[8] = "if3" + return meta + }(), + }, }, } @@ -76,9 +102,12 @@ var testDataOutMessage = []*vppcalls.BridgeDomain{ // - 2 bridge domains + 1 default in VPP // TestDumpBridgeDomainIDs tests DumpBridgeDomainIDs method func TestDumpBridgeDomainIDs(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, ifIndexes := bdTestSetup(t) defer ctx.TeardownTestCtx() + ifIndexes.RegisterName("if1", 5, nil) + ifIndexes.RegisterName("if2", 7, nil) + ctx.MockVpp.MockReply(testDataInMessagesBDs...) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) @@ -96,9 +125,13 @@ func TestDumpBridgeDomainIDs(t *testing.T) { // - 2 bridge domains + 1 default in VPP // TestDumpBridgeDomains tests DumpBridgeDomains method func TestDumpBridgeDomains(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, ifIndexes := bdTestSetup(t) defer ctx.TeardownTestCtx() + ifIndexes.RegisterName("if1", 5, nil) + ifIndexes.RegisterName("if2", 7, nil) + ifIndexes.RegisterName("if3", 8, nil) + ctx.MockVpp.MockReply(testDataInMessagesBDs...) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) @@ -127,25 +160,33 @@ var testDataInMessagesFIBs = []govppapi.Message{ }, } -var testDataOutFIBs = []*vppcalls.FIBTableEntry{ +var testDataOutFIBs = []*vppcalls.FibTableDetails{ { - BridgeDomainIdx: 10, - OutgoingInterfaceSwIfIdx: 1, - FibTable_FibEntry: l2nb.FibTable_FibEntry{ + Fib: &l2nb.FibTable_FibEntry{ PhysAddress: "aa:aa:aa:aa:aa:aa", + BridgeDomain: "bd1", Action: l2nb.FibTable_FibEntry_DROP, StaticConfig: true, BridgedVirtualInterface: true, + OutgoingInterface: "if1", + }, + Meta: &vppcalls.FibMeta{ + BdID: 10, + IfIdx: 1, }, }, { - BridgeDomainIdx: 20, - OutgoingInterfaceSwIfIdx: 2, - FibTable_FibEntry: l2nb.FibTable_FibEntry{ + Fib: &l2nb.FibTable_FibEntry{ PhysAddress: "bb:bb:bb:bb:bb:bb", + BridgeDomain: "bd2", Action: l2nb.FibTable_FibEntry_FORWARD, StaticConfig: false, BridgedVirtualInterface: false, + OutgoingInterface: "if2", + }, + Meta: &vppcalls.FibMeta{ + BdID: 20, + IfIdx: 2, }, }, } @@ -154,9 +195,14 @@ var testDataOutFIBs = []*vppcalls.FIBTableEntry{ // - 2 FIB entries in VPP // TestDumpFIBTableEntries tests DumpFIBTableEntries method func TestDumpFIBTableEntries(t *testing.T) { - ctx, fibHandler := fibTestSetup(t) + ctx, fibHandler, ifIndexes, bdIndexes := fibTestSetup(t) defer ctx.TeardownTestCtx() + ifIndexes.RegisterName("if1", 1, nil) + ifIndexes.RegisterName("if2", 2, nil) + bdIndexes.RegisterName("bd1", 10, nil) + bdIndexes.RegisterName("bd2", 20, nil) + ctx.MockVpp.MockReply(testDataInMessagesFIBs...) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) @@ -176,18 +222,41 @@ var testDataInXConnect = []govppapi.Message{ &l2ba.L2XconnectDetails{3, 4}, } -var testDataOutXconnect = []*vppcalls.XConnectPairs{ - {1, 2}, - {3, 4}, +var testDataOutXconnect = []*vppcalls.XConnectDetails{ + { + Xc: &l2nb.XConnectPairs_XConnectPair{ + ReceiveInterface: "if1", + TransmitInterface: "if2", + }, + Meta: &vppcalls.XcMeta{ + ReceiveInterfaceSwIfIdx: 1, + TransmitInterfaceSwIfIdx: 2, + }, + }, + { + Xc: &l2nb.XConnectPairs_XConnectPair{ + ReceiveInterface: "if3", + TransmitInterface: "if4", + }, + Meta: &vppcalls.XcMeta{ + ReceiveInterfaceSwIfIdx: 3, + TransmitInterfaceSwIfIdx: 4, + }, + }, } // Scenario: // - 2 Xconnect entries in VPP // TestDumpXConnectPairs tests DumpXConnectPairs method func TestDumpXConnectPairs(t *testing.T) { - ctx, xcHandler := xcTestSetup(t) + ctx, xcHandler, ifIndex := xcTestSetup(t) defer ctx.TeardownTestCtx() + ifIndex.RegisterName("if1", 1, nil) + ifIndex.RegisterName("if2", 2, nil) + ifIndex.RegisterName("if3", 3, nil) + ifIndex.RegisterName("if4", 4, nil) + ctx.MockVpp.MockReply(testDataInXConnect...) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) diff --git a/plugins/vpp/l2plugin/vppcalls/interface_vppcalls_test.go b/plugins/vpp/l2plugin/vppcalls/interface_vppcalls_test.go index 8a8cd4a885..8d82635e52 100644 --- a/plugins/vpp/l2plugin/vppcalls/interface_vppcalls_test.go +++ b/plugins/vpp/l2plugin/vppcalls/interface_vppcalls_test.go @@ -26,7 +26,7 @@ import ( ) func TestSetInterfacesToBridgeDomain(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2BridgeReply{}) @@ -75,7 +75,7 @@ func TestSetInterfacesToBridgeDomain(t *testing.T) { } func TestSetInterfacesToBridgeDomainNoInterfaceToSet(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bd", nil)) @@ -89,7 +89,7 @@ func TestSetInterfacesToBridgeDomainNoInterfaceToSet(t *testing.T) { } func TestSetInterfacesToBridgeDomainMissingInterface(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2BridgeReply{}) @@ -114,7 +114,7 @@ func TestSetInterfacesToBridgeDomainMissingInterface(t *testing.T) { } func TestSetInterfacesToBridgeDomainError(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2Bridge{}) @@ -133,7 +133,7 @@ func TestSetInterfacesToBridgeDomainError(t *testing.T) { } func TestSetInterfacesToBridgeDomainRetval(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2BridgeReply{ @@ -154,7 +154,7 @@ func TestSetInterfacesToBridgeDomainRetval(t *testing.T) { } func TestUnsetInterfacesFromBridgeDomain(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2BridgeReply{}) @@ -195,7 +195,7 @@ func TestUnsetInterfacesFromBridgeDomain(t *testing.T) { } func TestUnsetInterfacesFromBridgeDomainNoInterfaceToUnset(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bd", nil)) @@ -209,7 +209,7 @@ func TestUnsetInterfacesFromBridgeDomainNoInterfaceToUnset(t *testing.T) { } func TestUnsetInterfacesFromBridgeDomainMissingInterface(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2BridgeReply{}) @@ -234,7 +234,7 @@ func TestUnsetInterfacesFromBridgeDomainMissingInterface(t *testing.T) { } func TestUnsetInterfacesFromBridgeDomainError(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2Bridge{}) @@ -253,7 +253,7 @@ func TestUnsetInterfacesFromBridgeDomainError(t *testing.T) { } func TestUnsetInterfacesFromBridgeDomainRetval(t *testing.T) { - ctx, bdHandler := bdTestSetup(t) + ctx, bdHandler, _ := bdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&l2Api.SwInterfaceSetL2BridgeReply{ diff --git a/plugins/vpp/l2plugin/vppcalls/l2fib_vppcalls_test.go b/plugins/vpp/l2plugin/vppcalls/l2fib_vppcalls_test.go index 3df508ce52..526ff31dd9 100644 --- a/plugins/vpp/l2plugin/vppcalls/l2fib_vppcalls_test.go +++ b/plugins/vpp/l2plugin/vppcalls/l2fib_vppcalls_test.go @@ -22,7 +22,10 @@ import ( govppcore "git.fd.io/govpp.git/core" "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/vpp-agent/idxvpp/nametoidx" l2ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" + "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/l2idx" "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" @@ -54,7 +57,7 @@ var deleteTestDataOutFib = &l2ba.L2fibAddDel{ } func TestL2FibAdd(t *testing.T) { - ctx, fibHandler := fibTestSetup(t) + ctx, fibHandler, _, _ := fibTestSetup(t) defer ctx.TeardownTestCtx() go fibHandler.WatchFIBReplies() @@ -74,7 +77,7 @@ func TestL2FibAdd(t *testing.T) { } func TestL2FibAddError(t *testing.T) { - ctx, fibHandler := fibTestSetup(t) + ctx, fibHandler, _, _ := fibTestSetup(t) defer ctx.TeardownTestCtx() go fibHandler.WatchFIBReplies() @@ -100,7 +103,7 @@ func TestL2FibAddError(t *testing.T) { } func TestL2FibDelete(t *testing.T) { - ctx, fibHandler := fibTestSetup(t) + ctx, fibHandler, _, _ := fibTestSetup(t) defer ctx.TeardownTestCtx() go fibHandler.WatchFIBReplies() @@ -119,7 +122,7 @@ func TestL2FibDelete(t *testing.T) { } func TestWatchFIBReplies(t *testing.T) { - ctx, fibHandler := fibTestSetup(t) + ctx, fibHandler, _, _ := fibTestSetup(t) defer ctx.TeardownTestCtx() go fibHandler.WatchFIBReplies() @@ -142,7 +145,7 @@ func TestWatchFIBReplies(t *testing.T) { } func benchmarkWatchFIBReplies(reqN int, b *testing.B) { - ctx, fibHandler := fibTestSetup(nil) + ctx, fibHandler, _, _ := fibTestSetup(nil) defer ctx.TeardownTestCtx() // debug logs slow down benchmarks @@ -187,11 +190,13 @@ func BenchmarkWatchFIBReplies10(b *testing.B) { benchmarkWatchFIBReplies(10, b func BenchmarkWatchFIBReplies100(b *testing.B) { benchmarkWatchFIBReplies(100, b) } func BenchmarkWatchFIBReplies1000(b *testing.B) { benchmarkWatchFIBReplies(1000, b) } -func fibTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.FibVppAPI) { +func fibTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.FibVppAPI, ifaceidx.SwIfIndexRW, l2idx.BDIndexRW) { ctx := vppcallmock.SetupTestCtx(t) logger := logrus.NewLogger("test-log") requestChan := make(chan *vppcalls.FibLogicalReq) - fibHandler, err := vppcalls.NewFibVppHandler(ctx.MockChannel, ctx.MockChannel, requestChan, logger, nil) + ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logger, "fib-if-idx", nil)) + bdIndexes := l2idx.NewBDIndex(nametoidx.NewNameToIdx(logger, "fib-bd-idx", nil)) + fibHandler, err := vppcalls.NewFibVppHandler(ctx.MockChannel, ctx.MockChannel, requestChan, ifIndexes, bdIndexes, logger, nil) Expect(err).To(BeNil()) - return ctx, fibHandler + return ctx, fibHandler, ifIndexes, bdIndexes } diff --git a/plugins/vpp/l2plugin/vppcalls/xconnect_vppcalls_test.go b/plugins/vpp/l2plugin/vppcalls/xconnect_vppcalls_test.go index 691a11de6d..3733ae7cf3 100644 --- a/plugins/vpp/l2plugin/vppcalls/xconnect_vppcalls_test.go +++ b/plugins/vpp/l2plugin/vppcalls/xconnect_vppcalls_test.go @@ -19,7 +19,9 @@ import ( govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/vpp-agent/idxvpp/nametoidx" l2ba "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" @@ -53,7 +55,7 @@ scenarios: */ // TestVppSetL2XConnect tests VppSetL2XConnect method func TestVppSetL2XConnect(t *testing.T) { - ctx, xcHandler := xcTestSetup(t) + ctx, xcHandler, _ := xcTestSetup(t) defer ctx.TeardownTestCtx() for i := 0; i < len(setTestDataInXConnect); i++ { @@ -98,7 +100,7 @@ scenarios: */ // TestVppUnsetL2XConnect tests VppUnsetL2XConnect method func TestVppUnsetL2XConnect(t *testing.T) { - ctx, xcHandler := xcTestSetup(t) + ctx, xcHandler, _ := xcTestSetup(t) defer ctx.TeardownTestCtx() for i := 0; i < len(unsetTestDataInXConnect); i++ { @@ -115,10 +117,11 @@ func TestVppUnsetL2XConnect(t *testing.T) { } } -func xcTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.XConnectVppAPI) { +func xcTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.XConnectVppAPI, ifaceidx.SwIfIndexRW) { ctx := vppcallmock.SetupTestCtx(t) log := logrus.NewLogger("test-log") - xcHandler, err := vppcalls.NewXConnectVppHandler(ctx.MockChannel, log, nil) + ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "xc-if-idx", nil)) + xcHandler, err := vppcalls.NewXConnectVppHandler(ctx.MockChannel, ifIndexes, log, nil) Expect(err).To(BeNil()) - return ctx, xcHandler + return ctx, xcHandler, ifIndexes } diff --git a/plugins/vpp/l2plugin/xconnect_config.go b/plugins/vpp/l2plugin/xconnect_config.go index f46150bccf..803ca3d924 100644 --- a/plugins/vpp/l2plugin/xconnect_config.go +++ b/plugins/vpp/l2plugin/xconnect_config.go @@ -71,7 +71,7 @@ func (plugin *XConnectConfigurator) Init(logger logging.PluginLogger, goVppMux g } // Cross-connect VPP API handler - if plugin.xcHandler, err = vppcalls.NewXConnectVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch); err != nil { + if plugin.xcHandler, err = vppcalls.NewXConnectVppHandler(plugin.vppChan, plugin.ifIndexes, plugin.log, plugin.stopwatch); err != nil { return err } diff --git a/plugins/vpp/l3plugin/route_config.go b/plugins/vpp/l3plugin/route_config.go index a4bb3c7ef3..1d818dca4f 100644 --- a/plugins/vpp/l3plugin/route_config.go +++ b/plugins/vpp/l3plugin/route_config.go @@ -28,10 +28,10 @@ import ( "github.com/ligato/vpp-agent/idxvpp/nametoidx" "github.com/ligato/vpp-agent/plugins/govppmux" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" + ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/l3idx" "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" ) // RouteConfigurator runs in the background in its own goroutine where it watches for any changes From ae917bdced0708939294dd654efd9cd1445f0e52 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Wed, 25 Jul 2018 13:04:28 +0200 Subject: [PATCH 049/174] add vrf to interface dump Signed-off-by: Vladimir Lavor --- .../ifplugin/vppcalls/dump_interface_vppcalls.go | 13 +++++++++++++ plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go index c8f766f505..1d60ea65e6 100644 --- a/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go @@ -43,6 +43,7 @@ type InterfaceDetails struct { // InterfaceMeta is combination of proto-modelled Interface data and VPP provided metadata type InterfaceMeta struct { + SwIfIndex uint32 `json:"sw_if_index"` Tag string `json:"tag"` InternalName string `json:"internal_name"` } @@ -80,6 +81,7 @@ func (handler *ifVppHandler) DumpInterfaces() (map[uint32]*InterfaceDetails, err }(ifDetails.LinkMtu), }, Meta: &InterfaceMeta{ + SwIfIndex: ifDetails.SwIfIndex, Tag: string(bytes.SplitN(ifDetails.Tag, []byte{0x00}, 2)[0]), InternalName: string(bytes.SplitN(ifDetails.InterfaceName, []byte{0x00}, 2)[0]), }, @@ -95,6 +97,17 @@ func (handler *ifVppHandler) DumpInterfaces() (map[uint32]*InterfaceDetails, err } } + // Get vrf for every interface + for _, ifData := range ifs { + vrf, err := handler.GetInterfaceVRF(ifData.Meta.SwIfIndex) + if err != nil { + handler.log.Warnf("Interface dump: failed to get VRF from interface %d", ifData.Meta.SwIfIndex) + continue + } + ifData.Interface.Vrf = vrf + } + + handler.log.Debugf("dumped %d interfaces", len(ifs)) // SwInterfaceDump time diff --git a/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go index fa2ad1c72e..46f4b1ad10 100644 --- a/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go +++ b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go @@ -78,7 +78,7 @@ func (handler *bridgeDomainVppHandler) DumpBridgeDomains() (map[uint32]*BridgeDo for _, iface := range bdDetails.SwIfDetails { ifName, _, exists := handler.ifIndexes.LookupName(iface.SwIfIndex) if !exists { - handler.log.Warnf("Bridge domain dump: interface name for index %s not found", iface.SwIfIndex) + handler.log.Warnf("Bridge domain dump: interface name for index %d not found", iface.SwIfIndex) continue } // Bvi From d6c86a13ad074a6c6302de049bc18dc9674aabb3 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Wed, 25 Jul 2018 14:17:28 +0200 Subject: [PATCH 050/174] fix failing test Signed-off-by: Vladimir Lavor --- plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go index 08e307af00..0b32a22738 100644 --- a/plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go +++ b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go @@ -63,6 +63,7 @@ var testDataOutMessage = []*vppcalls.BridgeDomainDetails{ }, }, Meta: &vppcalls.BridgeDomainMeta{ + BdID: 4, BdIfIdxToName: func() map[uint32]string { meta := make(map[uint32]string) meta[5] = "if1" @@ -88,6 +89,7 @@ var testDataOutMessage = []*vppcalls.BridgeDomainDetails{ }, }, Meta: &vppcalls.BridgeDomainMeta{ + BdID: 5, BdIfIdxToName: func() map[uint32]string { meta := make(map[uint32]string) meta[5] = "if1" From 9ee5b9f12b241c61f2e54299d16425f90caf9b04 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Thu, 26 Jul 2018 07:37:45 +0200 Subject: [PATCH 051/174] changef bfd dump + added bfd to rest Signed-off-by: Vladimir Lavor --- cmd/vpp-agent-ctl/data_cmd.go | 2 +- plugins/rest/plugin_impl_rest.go | 52 +++-- plugins/rest/rest_handlers.go | 24 +++ plugins/vpp/ifplugin/bfd_config.go | 123 ++++------- plugins/vpp/ifplugin/bfd_config_test.go | 52 ----- plugins/vpp/ifplugin/data_resync.go | 14 +- plugins/vpp/ifplugin/vppcalls/api_vppcalls.go | 12 +- plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go | 68 +----- .../ifplugin/vppcalls/bfd_vppcalls_test.go | 170 +++++---------- .../ifplugin/vppcalls/dump_bfd_vppcalls.go | 193 ++++++++++++++++++ .../vppcalls/dump_bfd_vppcalls_test.go | 103 ++++++++++ plugins/vpp/model/bfd/keys_agent.go | 61 ++++-- 12 files changed, 488 insertions(+), 386 deletions(-) create mode 100644 plugins/vpp/ifplugin/vppcalls/dump_bfd_vppcalls.go create mode 100644 plugins/vpp/ifplugin/vppcalls/dump_bfd_vppcalls_test.go diff --git a/cmd/vpp-agent-ctl/data_cmd.go b/cmd/vpp-agent-ctl/data_cmd.go index 4b0957f1a5..200514fdd8 100644 --- a/cmd/vpp-agent-ctl/data_cmd.go +++ b/cmd/vpp-agent-ctl/data_cmd.go @@ -190,7 +190,7 @@ func (ctl *VppAgentCtl) createBfdSession() { { Interface: "memif1", Enabled: true, - SourceAddress: "192.168.1.2", + SourceAddress: "172.125.40.1", DestinationAddress: "20.10.0.5", RequiredMinRxInterval: 8, DesiredMinTxInterval: 3, diff --git a/plugins/rest/plugin_impl_rest.go b/plugins/rest/plugin_impl_rest.go index ca54c18174..ea7a5da0bb 100644 --- a/plugins/rest/plugin_impl_rest.go +++ b/plugins/rest/plugin_impl_rest.go @@ -20,12 +20,11 @@ import ( "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/flavors/local" "github.com/ligato/cn-infra/rpc/rest" + "github.com/ligato/cn-infra/utils/safeclose" "github.com/ligato/vpp-agent/plugins/govppmux" "github.com/ligato/vpp-agent/plugins/vpp" aclvppcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" - "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/l2idx" l2vppcalls "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" @@ -51,13 +50,10 @@ type Plugin struct { vppChan api.Channel dumpChan api.Channel - // Indexes - ifIndexes ifaceidx.SwIfIndex - bdIndexes l2idx.BDIndex - // Handlers aclHandler aclvppcalls.AclVppRead ifHandler ifvppcalls.IfVppRead + bfdHandler ifvppcalls.BfdVppRead bdHandler l2vppcalls.BridgeDomainVppRead fibHandler l2vppcalls.FibVppRead xcHandler l2vppcalls.XConnectVppRead @@ -78,6 +74,10 @@ type indexItem struct { // Init initializes the Rest Plugin func (plugin *Plugin) Init() (err error) { + // Check VPP dependency + if plugin.VPP == nil { + return fmt.Errorf("REST plugin requires VPP plugin API") + } // VPP channels if plugin.vppChan, err = plugin.GoVppmux.NewAPIChannel(); err != nil { return err @@ -86,10 +86,8 @@ func (plugin *Plugin) Init() (err error) { return err } // Indexes - if plugin.VPP != nil { - plugin.ifIndexes = plugin.VPP.GetSwIfIndexes() - plugin.bdIndexes = plugin.VPP.GetBDIndexes() - } + ifIndexes := plugin.VPP.GetSwIfIndexes() + bdIndexes := plugin.VPP.GetBDIndexes() // Initialize handlers if plugin.aclHandler, err = aclvppcalls.NewAclVppHandler(plugin.vppChan, plugin.dumpChan, nil); err != nil { @@ -98,21 +96,18 @@ func (plugin *Plugin) Init() (err error) { if plugin.ifHandler, err = ifvppcalls.NewIfVppHandler(plugin.vppChan, plugin.Log, nil); err != nil { return err } - if plugin.ifIndexes != nil { - if plugin.bdHandler, err = l2vppcalls.NewBridgeDomainVppHandler(plugin.vppChan, plugin.ifIndexes, plugin.Log, nil); err != nil { - return err - } + if plugin.bfdHandler, err = ifvppcalls.NewBfdVppHandler(plugin.vppChan, ifIndexes, plugin.Log, nil); err != nil { + return err } - if plugin.ifIndexes != nil && plugin.bdIndexes != nil { - if plugin.fibHandler, err = l2vppcalls.NewFibVppHandler(plugin.vppChan, plugin.dumpChan, make(chan *l2vppcalls.FibLogicalReq), - plugin.ifIndexes, plugin.bdIndexes, plugin.Log, nil); err != nil { - return err - } + if plugin.bdHandler, err = l2vppcalls.NewBridgeDomainVppHandler(plugin.vppChan, ifIndexes, plugin.Log, nil); err != nil { + return err } - if plugin.ifIndexes != nil { - if plugin.xcHandler, err = l2vppcalls.NewXConnectVppHandler(plugin.vppChan, plugin.ifIndexes, plugin.Log, nil); err != nil { - return err - } + if plugin.fibHandler, err = l2vppcalls.NewFibVppHandler(plugin.vppChan, plugin.dumpChan, make(chan *l2vppcalls.FibLogicalReq), + ifIndexes, bdIndexes, plugin.Log, nil); err != nil { + return err + } + if plugin.xcHandler, err = l2vppcalls.NewXConnectVppHandler(plugin.vppChan, ifIndexes, plugin.Log, nil); err != nil { + return err } plugin.indexItems = []indexItem{ @@ -148,10 +143,11 @@ func (plugin *Plugin) AfterInit() (err error) { if err := plugin.registerInterfaceHandlers(); err != nil { return err } - if plugin.bdHandler != nil { - if err := plugin.registerL2Handlers(); err != nil { - return err - } + if err := plugin.registerBfdHandlers(); err != nil { + return err + } + if err := plugin.registerL2Handlers(); err != nil { + return err } plugin.HTTPHandlers.RegisterHTTPHandler("/arps", plugin.arpGetHandler, "GET") @@ -170,5 +166,5 @@ func (plugin *Plugin) AfterInit() (err error) { // Close is used to clean up resources used by Plugin func (plugin *Plugin) Close() (err error) { - return nil + return safeclose.Close(plugin.vppChan, plugin.dumpChan) } diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index b1b769d2b1..b8688c0247 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -32,6 +32,7 @@ import ( aclcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" l3plugin "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" + "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/model/l2" ) @@ -142,6 +143,29 @@ func (plugin *Plugin) registerInterfaceHandlers() error { return nil } +func (plugin *Plugin) registerBfdHandlers() error { + // GET BFD configuration + if err := plugin.registerHTTPHandler(bfd.RestBfdKey(), GET, func() (interface{}, error) { + return plugin.bfdHandler.DumpBfdSingleHop() + }); err != nil { + return err + } + // GET BFD sessions + if err := plugin.registerHTTPHandler(bfd.RestSessionKey(), GET, func() (interface{}, error) { + return plugin.bfdHandler.DumpBfdSessions() + }); err != nil { + return err + } + // GET BFD authentication keys + if err := plugin.registerHTTPHandler(bfd.RestAuthKeysKey(), GET, func() (interface{}, error) { + return plugin.bfdHandler.DumpBfdAuthKeys() + }); err != nil { + return err + } + + return nil +} + // Registers L2 plugin REST handlers func (plugin *Plugin) registerL2Handlers() error { // GET bridge domain IDs diff --git a/plugins/vpp/ifplugin/bfd_config.go b/plugins/vpp/ifplugin/bfd_config.go index f4c7739d1f..812f067d7d 100644 --- a/plugins/vpp/ifplugin/bfd_config.go +++ b/plugins/vpp/ifplugin/bfd_config.go @@ -81,7 +81,7 @@ func (plugin *BFDConfigurator) Init(logger logging.PluginLogger, goVppMux govppm } // VPP API handler - if plugin.bfdHandler, err = vppcalls.NewBfdVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch); err != nil { + if plugin.bfdHandler, err = vppcalls.NewBfdVppHandler(plugin.vppChan, plugin.ifIndexes, plugin.log, plugin.stopwatch); err != nil { return err } @@ -244,45 +244,6 @@ func (plugin *BFDConfigurator) DeleteBfdSession(bfdInput *bfd.SingleHopBFD_Sessi return nil } -// DumpBfdSessions returns a list of all configured BFD sessions -func (plugin *BFDConfigurator) DumpBfdSessions() ([]*bfd.SingleHopBFD_Session, error) { - var bfdSessionList []*bfd.SingleHopBFD_Session - - bfdList, err := plugin.bfdHandler.DumpBfdUDPSessions() - if err != nil { - return bfdSessionList, err - } - - var wasError error - for _, bfdItem := range bfdList { - // find interface - ifName, _, found := plugin.ifIndexes.LookupName(bfdItem.SwIfIndex) - if !found { - plugin.log.Warnf("required interface %v not found for BFD", bfdItem.SwIfIndex) - } - - // Prepare IPv4 IP addresses - var dstAddr net.IP = bfdItem.PeerAddr[:4] - var srcAddr net.IP = bfdItem.LocalAddr[:4] - - bfdSessionList = append(bfdSessionList, &bfd.SingleHopBFD_Session{ - Interface: ifName, - DestinationAddress: dstAddr.To4().String(), - SourceAddress: srcAddr.To4().String(), - Enabled: true, - DesiredMinTxInterval: bfdItem.DesiredMinTx, - RequiredMinRxInterval: bfdItem.RequiredMinRx, - DetectMultiplier: uint32(bfdItem.DetectMult), - Authentication: &bfd.SingleHopBFD_Session_Authentication{ - KeyId: uint32(bfdItem.BfdKeyID), - AdvertisedKeyId: uint32(bfdItem.BfdKeyID), - }, - }) - } - - return bfdSessionList, wasError -} - // ConfigureBfdAuthKey crates new authentication key which can be used for BFD session func (plugin *BFDConfigurator) ConfigureBfdAuthKey(bfdAuthKey *bfd.SingleHopBFD_Key) error { plugin.log.Infof("Configuring BFD authentication key with ID %v", bfdAuthKey.Id) @@ -311,17 +272,21 @@ func (plugin *BFDConfigurator) ModifyBfdAuthKey(oldInput *bfd.SingleHopBFD_Key, if err != nil { return fmt.Errorf("error while verifying authentication key usage. Id: %d: %v", oldInput.Id, err) } - if len(sessionList) != 0 { + if sessionList != nil && len(sessionList.Session) != 0 { // Authentication Key is used and cannot be removed directly - for _, bfds := range sessionList { - sourceAddr := net.HardwareAddr(bfds.LocalAddr).String() - destAddr := net.HardwareAddr(bfds.PeerAddr).String() - err := plugin.bfdHandler.DeleteBfdUDPSession(bfds.SwIfIndex, sourceAddr, destAddr) + for _, bfds := range sessionList.Session { + sourceAddr := net.HardwareAddr(bfds.SourceAddress).String() + destAddr := net.HardwareAddr(bfds.DestinationAddress).String() + ifIdx, _, found := plugin.ifIndexes.LookupIdx(bfds.Interface) + if !found { + plugin.log.Warnf("Modify BFD auth key: interface index for %s not found", bfds.Interface) + } + err := plugin.bfdHandler.DeleteBfdUDPSession(ifIdx, sourceAddr, destAddr) if err != nil { return err } } - plugin.log.Debugf("%v session(s) temporary removed", len(sessionList)) + plugin.log.Debugf("%v session(s) temporary removed", len(sessionList.Session)) } err = plugin.bfdHandler.DeleteBfdUDPAuthenticationKey(oldInput) @@ -334,14 +299,18 @@ func (plugin *BFDConfigurator) ModifyBfdAuthKey(oldInput *bfd.SingleHopBFD_Key, } // Recreate BFD sessions if necessary - if len(sessionList) != 0 { - for _, bfdSession := range sessionList { - err := plugin.bfdHandler.AddBfdUDPSessionFromDetails(bfdSession, plugin.keysIndexes) + if sessionList != nil && len(sessionList.Session) != 0 { + for _, bfdSession := range sessionList.Session { + ifIdx, _, found := plugin.ifIndexes.LookupIdx(bfdSession.Interface) + if !found { + plugin.log.Warnf("Modify BFD auth key: interface index for %s not found", bfdSession.Interface) + } + err := plugin.bfdHandler.AddBfdUDPSession(bfdSession, ifIdx, plugin.keysIndexes) if err != nil { return err } } - plugin.log.Debugf("%v session(s) recreated", len(sessionList)) + plugin.log.Debugf("%v session(s) recreated", len(sessionList.Session)) } return nil @@ -357,17 +326,19 @@ func (plugin *BFDConfigurator) DeleteBfdAuthKey(bfdInput *bfd.SingleHopBFD_Key) return fmt.Errorf("error while verifying authentication key usage. Id: %v", bfdInput.Id) } - if len(sessionList) != 0 { + if sessionList != nil && len(sessionList.Session) != 0 { // Authentication Key is used and cannot be removed directly - for _, bfds := range sessionList { - sourceAddr := net.IP(bfds.LocalAddr[0:4]).String() - destAddr := net.IP(bfds.PeerAddr[0:4]).String() - err := plugin.bfdHandler.DeleteBfdUDPSession(bfds.SwIfIndex, sourceAddr, destAddr) + for _, bfds := range sessionList.Session { + ifIdx, _, found := plugin.ifIndexes.LookupIdx(bfds.Interface) + if !found { + plugin.log.Warnf("Delete BFD auth key: interface index for %s not found", bfds.Interface) + } + err := plugin.bfdHandler.DeleteBfdUDPSession(ifIdx, bfds.SourceAddress, bfds.DestinationAddress) if err != nil { return err } } - plugin.log.Debugf("%v session(s) temporary removed", len(sessionList)) + plugin.log.Debugf("%v session(s) temporary removed", len(sessionList.Session)) } err = plugin.bfdHandler.DeleteBfdUDPAuthenticationKey(bfdInput) if err != nil { @@ -377,46 +348,22 @@ func (plugin *BFDConfigurator) DeleteBfdAuthKey(bfdInput *bfd.SingleHopBFD_Key) plugin.keysIndexes.UnregisterName(authKeyIDAsString) plugin.log.Debugf("BFD authentication key with id %v unregistered", bfdInput.Id) // Recreate BFD sessions if necessary - if len(sessionList) != 0 { - for _, bfdSession := range sessionList { - err := plugin.bfdHandler.AddBfdUDPSessionFromDetails(bfdSession, plugin.keysIndexes) + if sessionList != nil && len(sessionList.Session) != 0 { + for _, bfdSession := range sessionList.Session { + ifIdx, _, found := plugin.ifIndexes.LookupIdx(bfdSession.Interface) + if !found { + plugin.log.Warnf("Delete BFD auth key: interface index for %s not found", bfdSession.Interface) + } + err := plugin.bfdHandler.AddBfdUDPSession(bfdSession, ifIdx, plugin.keysIndexes) if err != nil { return err } } - plugin.log.Debugf("%v session(s) recreated", len(sessionList)) + plugin.log.Debugf("%v session(s) recreated", len(sessionList.Session)) } return nil } -// DumpBFDAuthKeys returns a list of all configured authentication keys -func (plugin *BFDConfigurator) DumpBFDAuthKeys() ([]*bfd.SingleHopBFD_Key, error) { - var bfdAuthKeyList []*bfd.SingleHopBFD_Key - - keys, err := plugin.bfdHandler.DumpBfdKeys() - if err != nil { - return bfdAuthKeyList, err - } - - for _, key := range keys { - // resolve authentication type - var authType bfd.SingleHopBFD_Key_AuthenticationType - if key.AuthType == 4 { - authType = bfd.SingleHopBFD_Key_KEYED_SHA1 - } else { - authType = bfd.SingleHopBFD_Key_METICULOUS_KEYED_SHA1 - } - - bfdAuthKeyList = append(bfdAuthKeyList, &bfd.SingleHopBFD_Key{ - Id: key.ConfKeyID, - AuthKeyIndex: key.ConfKeyID, - AuthenticationType: authType, - }) - } - - return bfdAuthKeyList, nil -} - // ConfigureBfdEchoFunction is used to setup BFD Echo function on existing interface func (plugin *BFDConfigurator) ConfigureBfdEchoFunction(bfdInput *bfd.SingleHopBFD_EchoFunction) error { plugin.log.Infof("Configuring BFD echo function for source interface %v", bfdInput.EchoSourceInterface) diff --git a/plugins/vpp/ifplugin/bfd_config_test.go b/plugins/vpp/ifplugin/bfd_config_test.go index b0280ffcfc..57515fd75f 100644 --- a/plugins/vpp/ifplugin/bfd_config_test.go +++ b/plugins/vpp/ifplugin/bfd_config_test.go @@ -308,33 +308,6 @@ func TestBfdConfiguratorDeleteSessionError(t *testing.T) { Expect(err).ToNot(BeNil()) } -// BFD session dump -func TestBfdConfiguratorDumpBfdSessions(t *testing.T) { - var err error - // Setup - ctx, connection, plugin, ifIndexes := bfdTestSetup(t) - defer bfdTestTeardown(connection, plugin) - // Reply set - ctx.MockVpp.MockReply( - &bfd_api.BfdUDPSessionDetails{ - SwIfIndex: 1, - LocalAddr: net.ParseIP("10.0.0.1").To4(), - PeerAddr: net.ParseIP("10.0.0.2").To4(), - }, - &bfd_api.BfdUDPSessionDetails{ - SwIfIndex: 2, - LocalAddr: net.ParseIP("10.0.0.3").To4(), - PeerAddr: net.ParseIP("10.0.0.4").To4(), - }) - ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - // Register (only first interface) - ifIndexes.RegisterName("if1", 1, nil) - // Test bfd session dump - sessions, err := plugin.DumpBfdSessions() - Expect(err).To(BeNil()) - Expect(sessions).To(HaveLen(2)) -} - // Configure BFD authentication key func TestBfdConfiguratorSetAuthKey(t *testing.T) { var err error @@ -450,31 +423,6 @@ func TestBfdConfiguratorDeleteUsedAuthKey(t *testing.T) { Expect(err).To(BeNil()) } -// Dump BFD authentication key -func TestBfdConfiguratorDumpAuthKey(t *testing.T) { - var err error - // Setup - ctx, connection, plugin, _ := bfdTestSetup(t) - defer bfdTestTeardown(connection, plugin) - // Reply set - ctx.MockVpp.MockReply( - &bfd_api.BfdAuthKeysDetails{ - ConfKeyID: 1, - AuthType: 4, // Means KEYED SHA1 - }, - &bfd_api.BfdAuthKeysDetails{ - ConfKeyID: 2, - AuthType: 1, // Any other number is METICULOUS KEYED SHA1 - }) - ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - // Test authentication key dump - keys, err := plugin.DumpBFDAuthKeys() - Expect(err).To(BeNil()) - Expect(keys).To(HaveLen(2)) - Expect(keys[0].AuthenticationType).To(BeEquivalentTo(bfd.SingleHopBFD_Key_KEYED_SHA1)) - Expect(keys[1].AuthenticationType).To(BeEquivalentTo(bfd.SingleHopBFD_Key_METICULOUS_KEYED_SHA1)) -} - // Configure BFD echo function create/modify/delete func TestBfdConfiguratorEchoFunction(t *testing.T) { var err error diff --git a/plugins/vpp/ifplugin/data_resync.go b/plugins/vpp/ifplugin/data_resync.go index 826634b947..9d0345e857 100644 --- a/plugins/vpp/ifplugin/data_resync.go +++ b/plugins/vpp/ifplugin/data_resync.go @@ -232,8 +232,8 @@ func (plugin *BFDConfigurator) ResyncSession(nbSessions []*bfd.SingleHopBFD_Sess plugin.clearMapping() // Dump all BFD vppSessions - vppSessions, err := plugin.DumpBfdSessions() - if err != nil { + vppBfdSessions, err := plugin.bfdHandler.DumpBfdSessions() + if err != nil || vppBfdSessions == nil { return err } @@ -242,7 +242,7 @@ func (plugin *BFDConfigurator) ResyncSession(nbSessions []*bfd.SingleHopBFD_Sess for _, nbSession := range nbSessions { // look for configured session var found bool - for _, vppSession := range vppSessions { + for _, vppSession := range vppBfdSessions.Session { // compare fixed fields if nbSession.Interface == vppSession.Interface && nbSession.SourceAddress == vppSession.SourceAddress && nbSession.DestinationAddress == vppSession.DestinationAddress { @@ -265,7 +265,7 @@ func (plugin *BFDConfigurator) ResyncSession(nbSessions []*bfd.SingleHopBFD_Sess } // Remove old sessions - for _, vppSession := range vppSessions { + for _, vppSession := range vppBfdSessions.Session { // remove every not-yet-registered session _, _, found := plugin.sessionsIndexes.LookupIdx(vppSession.Interface) if !found { @@ -292,7 +292,7 @@ func (plugin *BFDConfigurator) ResyncAuthKey(nbKeys []*bfd.SingleHopBFD_Key) err }() // lookup BFD auth keys - vppKeys, err := plugin.DumpBFDAuthKeys() + vppBfdKeys, err := plugin.bfdHandler.DumpBfdAuthKeys() if err != nil { return err } @@ -302,7 +302,7 @@ func (plugin *BFDConfigurator) ResyncAuthKey(nbKeys []*bfd.SingleHopBFD_Key) err for _, nbKey := range nbKeys { // look for configured keys var found bool - for _, vppKey := range vppKeys { + for _, vppKey := range vppBfdKeys.AuthKeys { // compare key ID if nbKey.Id == vppKey.Id { plugin.log.Debugf("found configured BFD auth key with ID %v", nbKey.Id) @@ -324,7 +324,7 @@ func (plugin *BFDConfigurator) ResyncAuthKey(nbKeys []*bfd.SingleHopBFD_Key) err } // Remove old keys - for _, vppKey := range vppKeys { + for _, vppKey := range vppBfdKeys.AuthKeys { // remove every not-yet-registered keys _, _, found := plugin.keysIndexes.LookupIdx(AuthKeyIdentifier(vppKey.Id)) if !found { diff --git a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go index 8f13928ee1..5988bf6b51 100644 --- a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go @@ -140,12 +140,14 @@ type BfdVppWrite interface { // BfdVppRead provides read methods for BFD type BfdVppRead interface { + // DumpBfdSingleHop returns complete BFD configuration + DumpBfdSingleHop() (*BfdDetails, error) // DumpBfdUDPSessions returns a list of BFD session's metadata - DumpBfdUDPSessions() ([]*bfd_api.BfdUDPSessionDetails, error) + DumpBfdSessions() (*BfdSessionDetails, error) // DumpBfdUDPSessionsWithID returns a list of BFD session's metadata filtered according to provided authentication key - DumpBfdUDPSessionsWithID(authKeyIndex uint32) ([]*bfd_api.BfdUDPSessionDetails, error) + DumpBfdUDPSessionsWithID(authKeyIndex uint32) (*BfdSessionDetails, error) // DumpBfdKeys looks up all BFD auth keys and saves their name-to-index mapping - DumpBfdKeys() (keys []*bfd_api.BfdAuthKeysDetails, err error) + DumpBfdAuthKeys() (*BfdAuthKeyDetails, error) } // NatVppAPI provides methods for managing NAT @@ -226,6 +228,7 @@ type ifVppHandler struct { type bfdVppHandler struct { stopwatch *measure.Stopwatch callsChannel api.Channel + ifIndexes ifaceidx.SwIfIndex log logging.Logger } @@ -258,10 +261,11 @@ func NewIfVppHandler(callsChan api.Channel, log logging.Logger, stopwatch *measu } // NewBfdVppHandler creates new instance of BFD vppcalls handler -func NewBfdVppHandler(callsChan api.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*bfdVppHandler, error) { +func NewBfdVppHandler(callsChan api.Channel, ifIndexes ifaceidx.SwIfIndex, log logging.Logger, stopwatch *measure.Stopwatch) (*bfdVppHandler, error) { handler := &bfdVppHandler{ callsChannel: callsChan, stopwatch: stopwatch, + ifIndexes: ifIndexes, log: log, } if err := handler.callsChannel.CheckMessageCompatibility(BfdMessages...); err != nil { diff --git a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go index fc6cbb6b02..0e42aee901 100644 --- a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go @@ -19,6 +19,8 @@ import ( "net" "time" + "strconv" + "github.com/ligato/cn-infra/utils/addrs" "github.com/ligato/vpp-agent/idxvpp" bfd_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" @@ -61,7 +63,7 @@ func (handler *bfdVppHandler) AddBfdUDPSession(bfdSess *bfd.SingleHopBFD_Session // Authentication if bfdSess.Authentication != nil { - keyID := string(bfdSess.Authentication.KeyId) + keyID := strconv.FormatUint(uint64(bfdSess.Authentication.KeyId), 10) handler.log.Infof("Setting up authentication with index %v", keyID) _, _, found := bfdKeyIndexes.LookupIdx(keyID) if found { @@ -203,47 +205,6 @@ func (handler *bfdVppHandler) DeleteBfdUDPSession(ifIndex uint32, sourceAddress return nil } -func (handler *bfdVppHandler) DumpBfdUDPSessions() ([]*bfd_api.BfdUDPSessionDetails, error) { - return handler.dumpBfdUDPSessionsWithID(false, 0) -} - -func (handler *bfdVppHandler) DumpBfdUDPSessionsWithID(authKeyIndex uint32) ([]*bfd_api.BfdUDPSessionDetails, error) { - return handler.dumpBfdUDPSessionsWithID(true, authKeyIndex) -} - -func (handler *bfdVppHandler) dumpBfdUDPSessionsWithID(filterID bool, authKeyIndex uint32) (sessions []*bfd_api.BfdUDPSessionDetails, err error) { - defer func(t time.Time) { - handler.stopwatch.TimeLog(bfd_api.BfdUDPSessionDump{}).LogTimeEntry(time.Since(t)) - }(time.Now()) - - req := &bfd_api.BfdUDPSessionDump{} - reqCtx := handler.callsChannel.SendMultiRequest(req) - for { - msg := &bfd_api.BfdUDPSessionDetails{} - stop, err := reqCtx.ReceiveReply(msg) - if stop { - break - } - if err != nil { - return sessions, err - } - - if filterID { - // Not interested in sessions without auth key - if msg.IsAuthenticated == 0 { - continue - } - if msg.BfdKeyID == uint8(authKeyIndex) { - sessions = append(sessions, msg) - } - } else { - sessions = append(sessions, msg) - } - } - - return sessions, nil -} - func (handler *bfdVppHandler) SetBfdUDPAuthenticationKey(bfdKey *bfd.SingleHopBFD_Key) error { defer func(t time.Time) { handler.stopwatch.TimeLog(bfd_api.BfdAuthSetKey{}).LogTimeEntry(time.Since(t)) @@ -298,29 +259,6 @@ func (handler *bfdVppHandler) DeleteBfdUDPAuthenticationKey(bfdKey *bfd.SingleHo return nil } -func (handler *bfdVppHandler) DumpBfdKeys() (keys []*bfd_api.BfdAuthKeysDetails, err error) { - defer func(t time.Time) { - handler.stopwatch.TimeLog(bfd_api.BfdAuthKeysDump{}).LogTimeEntry(time.Since(t)) - }(time.Now()) - - req := &bfd_api.BfdAuthKeysDump{} - reqCtx := handler.callsChannel.SendMultiRequest(req) - for { - msg := &bfd_api.BfdAuthKeysDetails{} - stop, err := reqCtx.ReceiveReply(msg) - if stop { - break - } - if err != nil { - return nil, err - } - - keys = append(keys, msg) - } - - return keys, nil -} - func (handler *bfdVppHandler) AddBfdEchoFunction(bfdInput *bfd.SingleHopBFD_EchoFunction, swIfIndexes ifaceidx.SwIfIndex) error { defer func(t time.Time) { handler.stopwatch.TimeLog(bfd_api.BfdUDPSetEchoSource{}).LogTimeEntry(time.Since(t)) diff --git a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go index 9b891a9ae2..af732bf12a 100644 --- a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go @@ -21,20 +21,20 @@ import ( "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/vpp-agent/idxvpp/nametoidx" bfd_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" - "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" + "strconv" ) func TestAddBfdUDPSession(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) - bfdKeyIndexes.RegisterName(string(1), 1, nil) + bfdKeyIndexes.RegisterName(strconv.FormatUint(uint64(1), 10), 1, nil) ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) @@ -62,11 +62,11 @@ func TestAddBfdUDPSession(t *testing.T) { } func TestAddBfdUDPSessionIPv6(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) - bfdKeyIndexes.RegisterName(string(1), 1, nil) + bfdKeyIndexes.RegisterName(strconv.FormatUint(uint64(1), 10), 1, nil) ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) @@ -94,7 +94,7 @@ func TestAddBfdUDPSessionIPv6(t *testing.T) { } func TestAddBfdUDPSessionAuthKeyNotFound(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) @@ -125,7 +125,7 @@ func TestAddBfdUDPSessionAuthKeyNotFound(t *testing.T) { } func TestAddBfdUDPSessionNoAuthKey(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) @@ -149,11 +149,11 @@ func TestAddBfdUDPSessionNoAuthKey(t *testing.T) { } func TestAddBfdUDPSessionIncorrectSrcIPError(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) - bfdKeyIndexes.RegisterName(string(1), 1, nil) + bfdKeyIndexes.RegisterName(strconv.FormatUint(uint64(1), 10), 1, nil) ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) @@ -173,7 +173,7 @@ func TestAddBfdUDPSessionIncorrectSrcIPError(t *testing.T) { } func TestAddBfdUDPSessionIncorrectDstIPError(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) @@ -197,7 +197,7 @@ func TestAddBfdUDPSessionIncorrectDstIPError(t *testing.T) { } func TestAddBfdUDPSessionIPVerError(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) @@ -221,7 +221,7 @@ func TestAddBfdUDPSessionIPVerError(t *testing.T) { } func TestAddBfdUDPSessionError(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthSetKeyReply{}) @@ -237,7 +237,7 @@ func TestAddBfdUDPSessionError(t *testing.T) { } func TestAddBfdUDPSessionRetvalError(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{ @@ -255,7 +255,7 @@ func TestAddBfdUDPSessionRetvalError(t *testing.T) { } func TestAddBfdUDPSessionFromDetails(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) @@ -287,7 +287,7 @@ func TestAddBfdUDPSessionFromDetails(t *testing.T) { } func TestAddBfdUDPSessionFromDetailsAuthKeyNotFound(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) @@ -313,7 +313,7 @@ func TestAddBfdUDPSessionFromDetailsAuthKeyNotFound(t *testing.T) { } func TestAddBfdUDPSessionFromDetailsNoAuth(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) @@ -339,7 +339,7 @@ func TestAddBfdUDPSessionFromDetailsNoAuth(t *testing.T) { } func TestAddBfdUDPSessionFromDetailsError(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) @@ -357,7 +357,7 @@ func TestAddBfdUDPSessionFromDetailsError(t *testing.T) { } func TestAddBfdUDPSessionFromDetailsRetval(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) @@ -377,7 +377,7 @@ func TestAddBfdUDPSessionFromDetailsRetval(t *testing.T) { } func TestModifyBfdUDPSession(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -405,7 +405,7 @@ func TestModifyBfdUDPSession(t *testing.T) { } func TestModifyBfdUDPSessionIPv6(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -426,7 +426,7 @@ func TestModifyBfdUDPSessionIPv6(t *testing.T) { } func TestModifyBfdUDPSessionDifferentIPVer(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -444,7 +444,7 @@ func TestModifyBfdUDPSessionDifferentIPVer(t *testing.T) { } func TestModifyBfdUDPSessionNoInterface(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -461,7 +461,7 @@ func TestModifyBfdUDPSessionNoInterface(t *testing.T) { } func TestModifyBfdUDPSessionInvalidSrcIP(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -479,7 +479,7 @@ func TestModifyBfdUDPSessionInvalidSrcIP(t *testing.T) { } func TestModifyBfdUDPSessionInvalidDstIP(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -497,7 +497,7 @@ func TestModifyBfdUDPSessionInvalidDstIP(t *testing.T) { } func TestModifyBfdUDPSessionError(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -515,7 +515,7 @@ func TestModifyBfdUDPSessionError(t *testing.T) { } func TestModifyBfdUDPSessionRetval(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -535,7 +535,7 @@ func TestModifyBfdUDPSessionRetval(t *testing.T) { } func TestDeleteBfdUDPSession(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPDelReply{}) @@ -552,7 +552,7 @@ func TestDeleteBfdUDPSession(t *testing.T) { } func TestDeleteBfdUDPSessionError(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPModReply{}) @@ -563,7 +563,7 @@ func TestDeleteBfdUDPSessionError(t *testing.T) { } func TestDeleteBfdUDPSessionRetval(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPDelReply{ @@ -575,61 +575,8 @@ func TestDeleteBfdUDPSessionRetval(t *testing.T) { Expect(err).ToNot(BeNil()) } -func TestDumpBfdUDPSessions(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) - defer ctx.TeardownTestCtx() - - ctx.MockVpp.MockReply(&bfd_api.BfdUDPSessionDetails{ - SwIfIndex: 1, - LocalAddr: net.ParseIP("10.0.0.1"), - PeerAddr: net.ParseIP("20.0.0.1"), - }) - ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - - sessions, err := bfdHandler.DumpBfdUDPSessions() - - Expect(err).To(BeNil()) - Expect(sessions).To(HaveLen(1)) -} - -func TestDumpBfdUDPSessionsWithID(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) - defer ctx.TeardownTestCtx() - - // Authenticated wiht ID 1 - ctx.MockVpp.MockReply( - &bfd_api.BfdUDPSessionDetails{ - SwIfIndex: 1, - LocalAddr: net.ParseIP("10.0.0.1"), - PeerAddr: net.ParseIP("20.0.0.1"), - IsAuthenticated: 1, - BfdKeyID: 1, - }, - // Authenticated with ID 2 (filtered) - &bfd_api.BfdUDPSessionDetails{ - SwIfIndex: 2, - LocalAddr: net.ParseIP("10.0.0.2"), - PeerAddr: net.ParseIP("20.0.0.2"), - IsAuthenticated: 1, - BfdKeyID: 2, - }, - // Not authenticated - &bfd_api.BfdUDPSessionDetails{ - SwIfIndex: 3, - LocalAddr: net.ParseIP("10.0.0.3"), - PeerAddr: net.ParseIP("20.0.0.3"), - IsAuthenticated: 0, - }) - ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - - sessions, err := bfdHandler.DumpBfdUDPSessionsWithID(1) - - Expect(err).To(BeNil()) - Expect(sessions).To(HaveLen(1)) -} - func TestSetBfdUDPAuthenticationKeySha1(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthSetKeyReply{}) @@ -652,7 +599,7 @@ func TestSetBfdUDPAuthenticationKeySha1(t *testing.T) { } func TestSetBfdUDPAuthenticationKeyMeticulous(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthSetKeyReply{}) @@ -675,7 +622,7 @@ func TestSetBfdUDPAuthenticationKeyMeticulous(t *testing.T) { } func TestSetBfdUDPAuthenticationKeyUnknown(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthSetKeyReply{}) @@ -698,7 +645,7 @@ func TestSetBfdUDPAuthenticationKeyUnknown(t *testing.T) { } func TestSetBfdUDPAuthenticationError(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthDelKeyReply{}) @@ -715,7 +662,7 @@ func TestSetBfdUDPAuthenticationError(t *testing.T) { } func TestSetBfdUDPAuthenticationRetval(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthSetKeyReply{ @@ -734,7 +681,7 @@ func TestSetBfdUDPAuthenticationRetval(t *testing.T) { } func TestDeleteBfdUDPAuthenticationKey(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthDelKeyReply{}) @@ -754,7 +701,7 @@ func TestDeleteBfdUDPAuthenticationKey(t *testing.T) { } func TestDeleteBfdUDPAuthenticationKeyError(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthSetKeyReply{}) @@ -771,7 +718,7 @@ func TestDeleteBfdUDPAuthenticationKeyError(t *testing.T) { } func TestDeleteBfdUDPAuthenticationKeyRetval(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdAuthDelKeyReply{ @@ -789,30 +736,8 @@ func TestDeleteBfdUDPAuthenticationKeyRetval(t *testing.T) { Expect(err).ToNot(BeNil()) } -func TestDumpBfdKeys(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) - defer ctx.TeardownTestCtx() - - ctx.MockVpp.MockReply(&bfd_api.BfdAuthKeysDetails{ - ConfKeyID: 1, - UseCount: 0, - AuthType: 4, - }, - &bfd_api.BfdAuthKeysDetails{ - ConfKeyID: 2, - UseCount: 1, - AuthType: 5, - }) - ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - - keys, err := bfdHandler.DumpBfdKeys() - - Expect(err).To(BeNil()) - Expect(keys).To(HaveLen(2)) -} - func TestAddBfdEchoFunction(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -831,7 +756,7 @@ func TestAddBfdEchoFunction(t *testing.T) { } func TestAddBfdEchoFunctionInterfaceNotFound(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -846,7 +771,7 @@ func TestAddBfdEchoFunctionInterfaceNotFound(t *testing.T) { } func TestAddBfdEchoFunctionError(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -862,7 +787,7 @@ func TestAddBfdEchoFunctionError(t *testing.T) { } func TestAddBfdEchoFunctionRetval(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "if", nil)) @@ -880,7 +805,7 @@ func TestAddBfdEchoFunctionRetval(t *testing.T) { } func TestDeleteBfdEchoFunction(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPDelEchoSourceReply{}) @@ -892,7 +817,7 @@ func TestDeleteBfdEchoFunction(t *testing.T) { } func TestDeleteBfdEchoFunctionError(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPSetEchoSourceReply{}) @@ -902,7 +827,7 @@ func TestDeleteBfdEchoFunctionError(t *testing.T) { } func TestDeleteBfdEchoFunctionRetval(t *testing.T) { - ctx, bfdHandler := bfdTestSetup(t) + ctx, bfdHandler, _ := bfdTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bfd_api.BfdUDPDelEchoSourceReply{ @@ -913,10 +838,11 @@ func TestDeleteBfdEchoFunctionRetval(t *testing.T) { Expect(err).ToNot(BeNil()) } -func bfdTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.BfdVppAPI) { +func bfdTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.BfdVppAPI, ifaceidx.SwIfIndexRW) { ctx := vppcallmock.SetupTestCtx(t) log := logrus.NewLogger("test-log") - bfdHandler, err := vppcalls.NewBfdVppHandler(ctx.MockChannel, log, nil) + ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "bfd-if-idx", nil)) + bfdHandler, err := vppcalls.NewBfdVppHandler(ctx.MockChannel, ifIndexes, log, nil) Expect(err).To(BeNil()) - return ctx, bfdHandler + return ctx, bfdHandler, ifIndexes } diff --git a/plugins/vpp/ifplugin/vppcalls/dump_bfd_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/dump_bfd_vppcalls.go new file mode 100644 index 0000000000..46495661ce --- /dev/null +++ b/plugins/vpp/ifplugin/vppcalls/dump_bfd_vppcalls.go @@ -0,0 +1,193 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vppcalls + +import ( + "net" + "time" + + bfdapi "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" + "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" +) + +// BfdDetails is the wrapper structure for the BFD northbound API structure. +type BfdDetails struct { + Bfd *bfd.SingleHopBFD + Meta *BfdMeta +} + +// BfdMeta is combination of proto-modelled BFD data and VPP provided metadata +type BfdMeta struct { + *BfdSessionMeta + *BfdAuthKeyMeta +} + +func (handler *bfdVppHandler) DumpBfdSingleHop() (*BfdDetails, error) { + sessionDetails, err := handler.DumpBfdSessions() + if err != nil { + return nil, err + } + keyDetails, err := handler.DumpBfdAuthKeys() + if err != nil { + return nil, err + } + + return &BfdDetails{ + Bfd: &bfd.SingleHopBFD{ + Sessions: sessionDetails.Session, + Keys: keyDetails.AuthKeys, + }, + Meta: &BfdMeta{ + BfdSessionMeta: sessionDetails.Meta, + BfdAuthKeyMeta: keyDetails.Meta, + }, + }, nil +} + +// BfdSessionDetails is the wrapper structure for the BFD session northbound API structure. +type BfdSessionDetails struct { + Session []*bfd.SingleHopBFD_Session + Meta *BfdSessionMeta +} + +// BfdSessionMeta is combination of proto-modelled BFD session data and session interface to index map +type BfdSessionMeta struct { + SessionIfToIdx map[uint32]string +} + +func (handler *bfdVppHandler) DumpBfdSessions() (*BfdSessionDetails, error) { + defer func(t time.Time) { + handler.stopwatch.TimeLog(bfdapi.BfdUDPSessionDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) + + var sessions []*bfd.SingleHopBFD_Session + meta := &BfdSessionMeta{ + SessionIfToIdx: make(map[uint32]string), + } + + req := &bfdapi.BfdUDPSessionDump{} + sessionsRequest := handler.callsChannel.SendMultiRequest(req) + + for { + sessionDetails := &bfdapi.BfdUDPSessionDetails{} + stop, err := sessionsRequest.ReceiveReply(sessionDetails) + if stop { + break + } + if err != nil { + return nil, err + } + + ifName, _, exists := handler.ifIndexes.LookupName(sessionDetails.SwIfIndex) + if !exists { + handler.log.Warnf("BFD session dump: interface name not found for index %d", sessionDetails.SwIfIndex) + } + var srcAddr, dstAddr net.IP = sessionDetails.LocalAddr, sessionDetails.PeerAddr + + // Put session info + sessions = append(sessions, &bfd.SingleHopBFD_Session{ + Interface: ifName, + DestinationAddress: dstAddr.String(), + SourceAddress: srcAddr.String(), + DesiredMinTxInterval: sessionDetails.DesiredMinTx, + RequiredMinRxInterval: sessionDetails.RequiredMinRx, + DetectMultiplier: uint32(sessionDetails.DetectMult), + Authentication: &bfd.SingleHopBFD_Session_Authentication{ + KeyId: uint32(sessionDetails.BfdKeyID), + AdvertisedKeyId: uint32(sessionDetails.ConfKeyID), + }, + }) + // Put bfd interface info + meta.SessionIfToIdx[sessionDetails.SwIfIndex] = ifName + } + + return &BfdSessionDetails{ + Session: sessions, + Meta: meta, + }, nil +} + +func (handler *bfdVppHandler) DumpBfdUDPSessionsWithID(authKeyIndex uint32) (*BfdSessionDetails, error) { + details, err := handler.DumpBfdSessions() + if err != nil || len(details.Session) == 0 { + return nil, err + } + + var indexedSessions []*bfd.SingleHopBFD_Session + for _, session := range details.Session { + if session.Authentication != nil && session.Authentication.KeyId == authKeyIndex { + indexedSessions = append(indexedSessions, session) + } + } + + return &BfdSessionDetails{ + Session: indexedSessions, + }, nil +} + +// BfdAuthKeyDetails is the wrapper structure for the BFD authentication key northbound API structure. +type BfdAuthKeyDetails struct { + AuthKeys []*bfd.SingleHopBFD_Key + Meta *BfdAuthKeyMeta +} + +// BfdAuthKeyMeta is combination of proto-modelled BFD session data and key-to-usage map +type BfdAuthKeyMeta struct { + KeyIDToUseCount map[uint32]uint32 +} + +func (handler *bfdVppHandler) DumpBfdAuthKeys() (*BfdAuthKeyDetails, error) { + defer func(t time.Time) { + handler.stopwatch.TimeLog(bfdapi.BfdAuthKeysDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) + + var authKeys []*bfd.SingleHopBFD_Key + meta := &BfdAuthKeyMeta{ + KeyIDToUseCount: make(map[uint32]uint32), + } + + req := &bfdapi.BfdAuthKeysDump{} + keysRequest := handler.callsChannel.SendMultiRequest(req) + + for { + keyDetails := &bfdapi.BfdAuthKeysDetails{} + stop, err := keysRequest.ReceiveReply(keyDetails) + if stop { + break + } + if err != nil { + return nil, err + } + + // Put auth key info + authKeys = append(authKeys, &bfd.SingleHopBFD_Key{ + AuthKeyIndex: keyDetails.ConfKeyID, + Id: keyDetails.ConfKeyID, + AuthenticationType: func(authType uint8) bfd.SingleHopBFD_Key_AuthenticationType { + if authType == 4 { + return bfd.SingleHopBFD_Key_KEYED_SHA1 + } + return bfd.SingleHopBFD_Key_METICULOUS_KEYED_SHA1 + }(keyDetails.AuthType), + }) + // Put bfd key use count info + meta.KeyIDToUseCount[keyDetails.ConfKeyID] = keyDetails.UseCount + } + + return &BfdAuthKeyDetails{ + AuthKeys: authKeys, + Meta: meta, + }, nil +} diff --git a/plugins/vpp/ifplugin/vppcalls/dump_bfd_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/dump_bfd_vppcalls_test.go new file mode 100644 index 0000000000..153c9894f1 --- /dev/null +++ b/plugins/vpp/ifplugin/vppcalls/dump_bfd_vppcalls_test.go @@ -0,0 +1,103 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vppcalls_test + +import ( + "net" + "testing" + + bfd_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" + "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" + . "github.com/onsi/gomega" +) + +// TestDumpBfdUDPSessions tests BFD udp session dump +func TestDumpBfdUDPSessions(t *testing.T) { + ctx, bfdHandler, _ := bfdTestSetup(t) + defer ctx.TeardownTestCtx() + + ctx.MockVpp.MockReply(&bfd_api.BfdUDPSessionDetails{ + SwIfIndex: 1, + LocalAddr: net.ParseIP("10.0.0.1"), + PeerAddr: net.ParseIP("20.0.0.1"), + }) + ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) + + bfdConfig, err := bfdHandler.DumpBfdSessions() + + Expect(err).To(BeNil()) + Expect(bfdConfig.Session).To(HaveLen(1)) +} + +// TestDumpBfdUDPSessions tests BFD udp session dump where the result is filtered +// according to session authentication key ID +func TestDumpBfdUDPSessionsWithID(t *testing.T) { + ctx, bfdHandler, _ := bfdTestSetup(t) + defer ctx.TeardownTestCtx() + + // Authenticated wiht ID 1 + ctx.MockVpp.MockReply( + &bfd_api.BfdUDPSessionDetails{ + SwIfIndex: 1, + LocalAddr: net.ParseIP("10.0.0.1"), + PeerAddr: net.ParseIP("20.0.0.1"), + IsAuthenticated: 1, + BfdKeyID: 1, + }, + // Authenticated with ID 2 (filtered) + &bfd_api.BfdUDPSessionDetails{ + SwIfIndex: 2, + LocalAddr: net.ParseIP("10.0.0.2"), + PeerAddr: net.ParseIP("20.0.0.2"), + IsAuthenticated: 1, + BfdKeyID: 2, + }, + // Not authenticated + &bfd_api.BfdUDPSessionDetails{ + SwIfIndex: 3, + LocalAddr: net.ParseIP("10.0.0.3"), + PeerAddr: net.ParseIP("20.0.0.3"), + IsAuthenticated: 0, + }) + ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) + + bfdConfig, err := bfdHandler.DumpBfdUDPSessionsWithID(1) + + Expect(err).To(BeNil()) + Expect(bfdConfig.Session).To(HaveLen(1)) +} + +// TestDumpBfdKeys tests BFD key dump +func TestDumpBfdKeys(t *testing.T) { + ctx, bfdHandler, _ := bfdTestSetup(t) + defer ctx.TeardownTestCtx() + + ctx.MockVpp.MockReply(&bfd_api.BfdAuthKeysDetails{ + ConfKeyID: 1, + UseCount: 0, + AuthType: 4, + }, + &bfd_api.BfdAuthKeysDetails{ + ConfKeyID: 2, + UseCount: 1, + AuthType: 5, + }) + ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) + + bfdConfig, err := bfdHandler.DumpBfdAuthKeys() + + Expect(err).To(BeNil()) + Expect(bfdConfig.AuthKeys).To(HaveLen(2)) +} diff --git a/plugins/vpp/model/bfd/keys_agent.go b/plugins/vpp/model/bfd/keys_agent.go index 3b06442f63..b695861430 100644 --- a/plugins/vpp/model/bfd/keys_agent.go +++ b/plugins/vpp/model/bfd/keys_agent.go @@ -14,44 +14,67 @@ package bfd -// BfdSessionPrefix bfd-session/ -const BfdSessionPrefix = "vpp/config/v1/bfd/session/" +import "github.com/ligato/vpp-agent/plugins/vpp/model" -// BfdAuthKeysPrefix bfd-key/ -const BfdAuthKeysPrefix = "vpp/config/v1/bfd/auth-key/" +const ( + // restBfdKey is a REST path of a bfd + restBfdKey = model.ProtoApiVersion + "bfd" + // bfdSessionPrefix bfd-session/ + bfdSessionPrefix = "vpp/config" + model.ProtoApiVersion + "bfd/session/" + // restBfdSessionKey is a REST path of a bfd sessions + restBfdSessionKey = model.ProtoApiVersion + "bfd/sessions" + // bfdAuthKeysPrefix bfd-key/ + bfdAuthKeysPrefix = "vpp/config" + model.ProtoApiVersion + "bfd/auth-key/" + // restBfdAuthKey is a REST path of a bfd authentication keys + restBfdAuthKey = model.ProtoApiVersion + "bfd/authkeys" + // BfdEchoFunctionPrefix bfd-echo-function/ + bfdEchoFunctionPrefix = "vpp/config" + model.ProtoApiVersion + "bfd/echo-function" +) -// BfdEchoFunctionPrefix bfd-echo-function/ -const BfdEchoFunctionPrefix = "vpp/config/v1/bfd/echo-function" +// RestBfdKey returns prefix used in REST to dump bfd config +func RestBfdKey() string { + return restBfdKey +} // SessionKeyPrefix returns the prefix used in ETCD to store vpp bfd config. func SessionKeyPrefix() string { - return BfdSessionPrefix + return bfdSessionPrefix } -// AuthKeysKeyPrefix returns the prefix used in ETCD to store vpp bfd config. -func AuthKeysKeyPrefix() string { - return BfdAuthKeysPrefix +// SessionKey returns the prefix used in ETCD to store vpp bfd config +// of a particular bfd session in selected vpp instance. +func SessionKey(bfdSessionIfaceLabel string) string { + return bfdSessionPrefix + bfdSessionIfaceLabel } -// EchoFunctionKeyPrefix returns the prefix used in ETCD to store vpp bfd config. -func EchoFunctionKeyPrefix() string { - return BfdEchoFunctionPrefix +// RestSessionKey returns prefix used in REST to dump bfd session config +func RestSessionKey() string { + return restBfdSessionKey } -// SessionKey returns the prefix used in ETCD to store vpp bfd config -// of a particular bfd session in selected vpp instance. -func SessionKey(bfdSessionIfaceLabel string) string { - return BfdSessionPrefix + bfdSessionIfaceLabel +// AuthKeysKeyPrefix returns the prefix used in ETCD to store vpp bfd config. +func AuthKeysKeyPrefix() string { + return bfdAuthKeysPrefix } // AuthKeysKey returns the prefix used in ETCD to store vpp bfd config // of a particular bfd key in selected vpp instance. func AuthKeysKey(bfdKeyIDLabel string) string { - return BfdAuthKeysPrefix + bfdKeyIDLabel + return bfdAuthKeysPrefix + bfdKeyIDLabel +} + +// RestAuthKeysKey returns prefix used in REST to dump bfd authentication config +func RestAuthKeysKey() string { + return restBfdAuthKey +} + +// EchoFunctionKeyPrefix returns the prefix used in ETCD to store vpp bfd config. +func EchoFunctionKeyPrefix() string { + return bfdEchoFunctionPrefix } // EchoFunctionKey returns the prefix used in ETCD to store vpp bfd config // of a particular bfd echo function in selected vpp instance. func EchoFunctionKey(bfdEchoIfaceLabel string) string { - return BfdEchoFunctionPrefix + bfdEchoIfaceLabel + return bfdEchoFunctionPrefix + bfdEchoIfaceLabel } From b810e98b3be52453e56e8b91ce5ed0925ba26a6d Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Thu, 26 Jul 2018 08:01:27 +0200 Subject: [PATCH 052/174] updated readme Signed-off-by: Vladimir Lavor --- plugins/rest/README.md | 106 +++++++++++------- .../model/interfaces/keys_agent_interfaces.go | 14 +-- 2 files changed, 72 insertions(+), 48 deletions(-) diff --git a/plugins/rest/README.md b/plugins/rest/README.md index 26c15c634c..76ef02298d 100644 --- a/plugins/rest/README.md +++ b/plugins/rest/README.md @@ -1,6 +1,6 @@ # REST API Plugin -The `restplugin` is a core Agent Plugin used of exposing REST API for the following: +The `restplugin` is a core Agent Plugin used to expose REST API for the following: * Run VPP CLI commands * Exposes existing Northbound objects * Provides logging mechanism so that the VPPCLI command and response can be searched in elastic search @@ -11,16 +11,72 @@ curl -H "Content-Type: application/json" -X POST -d '{"vppclicommand":"show inte ``` ## Exposing existing Northbound objects + +Here is the list of supported REST URLs. If configuration dump URL is used, the output is based on proto model +structure for given data type together with VPP-specific data which are not a part of the model (indexes for +interfaces or ACLs, internal names, etc.). Those data are in separate section labeled as `Meta`. + +**Access lists** + +URLs to obtain ACL IP/MACIP configuration are as follows. + +``` +curl http://0.0.0.0:9191/v1/acl/ip +curl http://0.0.0.0:9191/v1/acl/macip +``` + +It is also possible to obtain ACL example configuration (no configured on the VPP). + +``` +curl http://0.0.0.0:9191/v1/acl/ip/example +curl http://0.0.0.0:9191/v1/acl/macip/example +``` + +**Interfaces** + +REST plugin exposes configured interfaces, which can be show all together, or only interfaces +of specific type. + +``` +curl http://0.0.0.0:9191/v1/interfaces +curl http://0.0.0.0:9191/v1/interfaces/loopback +curl http://0.0.0.0:9191/v1/interfaces/ethernet +curl http://0.0.0.0:9191/v1/interfaces/memif +curl http://0.0.0.0:9191/v1/interfaces/tap +curl http://0.0.0.0:9191/v1/interfaces/vxlan +curl http://0.0.0.0:9191/v1/interfaces/afpacket +``` + +**BFD** + +REST plugin allows to dump bidirectional forwarding detection sessions, authentication keys, +or the whole configuration. + +``` +curl http://0.0.0.0:9191/v1/bfd +curl http://0.0.0.0:9191/v1/bfd/sessions +curl http://0.0.0.0:9191/v1/bfd/authkeys +``` + +**L2 plugin** + +Support for bridge domains, FIBs and cross connects. It is also possible to get all +the bridge domain IDs. + +``` +curl http://0.0.0.0:9191/v1/bdid +curl http://0.0.0.0:9191/v1/bd +curl http://0.0.0.0:9191/v1/fibs +curl http://0.0.0.0:9191/v1/xc +``` + +**L3 plugin** + +ARPs and static routes exposed via REST: + ``` -curl http://0.0.0.0:9191/interfaces -curl http://0.0.0.0:9191/bridgedomainids -curl http://0.0.0.0:9191/bridgedomains -curl http://0.0.0.0:9191/fibs -curl http://0.0.0.0:9191/xconnectpairs curl http://0.0.0.0:9191/staticroutes -curl http://0.0.0.0:9191/acl/interface/ // Get ACLs for interface -curl http://0.0.0.0:9191/acl/ip // Get all IP ACLs -curl http://0.0.0.0:9191/acl/ip/example // Get an example ACL +curl http://0.0.0.0:9191/arps ``` Configure an IP ACL: @@ -28,37 +84,5 @@ Configure an IP ACL: curl -H "Content-Type: application/json" -X POST -d '' http://0.0.0.0:9191/interface/acl/ip ``` -For example: -``` -curl -H "Content-Type: application/json" -X POST -d '{ - "acl_name": "example" - "rules": [ - { - "rule_name": "acl1_rule1", { "actions": { - "acl_action": 1, - "match": { - "ip_rule": { - "ip": { - "destination_network": "1.2.3.4/24", - "source_network": "5.6.7.8/24" - }, - "tcp": { - "destination_port_range": { - "lower_port": 80, - "upper_port": 8080 - }, - "source_port_range": { - "lower_port": 10, - "upper_port": 1010 - }, - "tcp_flags_mask": 255, - "tcp_flags_value": 9 - } - } - } - } - ] -}' http://0.0.0.0:9191/interface/acl/ip -``` ## Logging mechanism The REST API request is logged to stdout. The log contains VPPCLI command and VPPCLI response. It is searchable in elastic search using "VPPCLI". \ No newline at end of file diff --git a/plugins/vpp/model/interfaces/keys_agent_interfaces.go b/plugins/vpp/model/interfaces/keys_agent_interfaces.go index bc254af361..8f62b4e85a 100644 --- a/plugins/vpp/model/interfaces/keys_agent_interfaces.go +++ b/plugins/vpp/model/interfaces/keys_agent_interfaces.go @@ -29,19 +29,19 @@ const ( // ifErrorPrefix is interface error prefix ifErrorPrefix = "vpp/status" + model.ProtoApiVersion + "interface/error/" // restInterface is rest interface path - restInterface = model.ProtoApiVersion + "interface" + restInterface = model.ProtoApiVersion + "interfaces" // restLoopback is path for loopback interface - restLoopback = model.ProtoApiVersion + "interface/loopback" + restLoopback = model.ProtoApiVersion + "interfaces/loopback" // restLoopback is path for physical interface - restEthernet = model.ProtoApiVersion + "interface/ethernet" + restEthernet = model.ProtoApiVersion + "interfaces/ethernet" // restLoopback is path for memif interface - restMemif = model.ProtoApiVersion + "interface/memif" + restMemif = model.ProtoApiVersion + "interfaces/memif" // restLoopback is path for tap interface - restTap = model.ProtoApiVersion + "interface/tap" + restTap = model.ProtoApiVersion + "interfaces/tap" // restAfPacket is path for af-packet interface - restAfPacket = model.ProtoApiVersion + "interface/afpacket" + restAfPacket = model.ProtoApiVersion + "interfaces/afpacket" // restLoopback is path for vxlan interface - restVxLan = model.ProtoApiVersion + "interface/vxlan" + restVxLan = model.ProtoApiVersion + "interfaces/vxlan" ) // InterfaceKeyPrefix returns the prefix used in ETCD to store vpp interfaces config. From e677621dbb04cc6df109063691132e15077777b1 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Thu, 26 Jul 2018 09:25:37 +0200 Subject: [PATCH 053/174] better error propagation Signed-off-by: Vladimir Lavor --- plugins/rest/rest_handlers.go | 108 ++++++++++++---------------------- 1 file changed, 36 insertions(+), 72 deletions(-) diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index b8688c0247..e812a4172a 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -40,17 +40,13 @@ import ( // Registers access list REST handlers func (plugin *Plugin) registerAccessListHandlers() error { // GET IP ACLs - if err := plugin.registerHTTPHandler(acl.RestIPKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(acl.RestIPKey(), GET, func() (interface{}, error) { return plugin.aclHandler.DumpIPACL(nil) - }); err != nil { - return err - } + }) // GET MACIP ACLs - if err := plugin.registerHTTPHandler(acl.RestMACIPKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(acl.RestMACIPKey(), GET, func() (interface{}, error) { return plugin.aclHandler.DumpMacIPAcls() - }); err != nil { - return err - } + }) // GET IP ACL example plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestIPExampleKey(), plugin.exampleIpACLGetHandler, GET) // GET MACIP ACL example @@ -62,13 +58,11 @@ func (plugin *Plugin) registerAccessListHandlers() error { // Registers interface REST handlers func (plugin *Plugin) registerInterfaceHandlers() error { // GET all interfaces - if err := plugin.registerHTTPHandler(interfaces.RestInterfaceKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(interfaces.RestInterfaceKey(), GET, func() (interface{}, error) { return plugin.ifHandler.DumpInterfaces() - }); err != nil { - return err - } + }) // GET loopback interfaces - if err := plugin.registerHTTPHandler(interfaces.RestLoopbackKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(interfaces.RestLoopbackKey(), GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_SOFTWARE_LOOPBACK { @@ -76,11 +70,9 @@ func (plugin *Plugin) registerInterfaceHandlers() error { } } return ifs, err - }); err != nil { - return err - } + }) // GET ethernet interfaces - if err := plugin.registerHTTPHandler(interfaces.RestEthernetKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(interfaces.RestEthernetKey(), GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_ETHERNET_CSMACD { @@ -88,11 +80,9 @@ func (plugin *Plugin) registerInterfaceHandlers() error { } } return ifs, err - }); err != nil { - return err - } + }) // GET memif interfaces - if err := plugin.registerHTTPHandler(interfaces.RestMemifKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(interfaces.RestMemifKey(), GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_MEMORY_INTERFACE { @@ -100,11 +90,9 @@ func (plugin *Plugin) registerInterfaceHandlers() error { } } return ifs, err - }); err != nil { - return err - } + }) // GET tap interfaces - if err := plugin.registerHTTPHandler(interfaces.RestTapKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(interfaces.RestTapKey(), GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_TAP_INTERFACE { @@ -112,11 +100,9 @@ func (plugin *Plugin) registerInterfaceHandlers() error { } } return ifs, err - }); err != nil { - return err - } + }) // GET af-packet interfaces - if err := plugin.registerHTTPHandler(interfaces.RestAfPAcketKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(interfaces.RestAfPAcketKey(), GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_AF_PACKET_INTERFACE { @@ -124,11 +110,9 @@ func (plugin *Plugin) registerInterfaceHandlers() error { } } return ifs, err - }); err != nil { - return err - } + }) // GET VxLAN interfaces - if err := plugin.registerHTTPHandler(interfaces.RestVxLanKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(interfaces.RestVxLanKey(), GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_VXLAN_TUNNEL { @@ -136,32 +120,24 @@ func (plugin *Plugin) registerInterfaceHandlers() error { } } return ifs, err - }); err != nil { - return err - } + }) return nil } func (plugin *Plugin) registerBfdHandlers() error { // GET BFD configuration - if err := plugin.registerHTTPHandler(bfd.RestBfdKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(bfd.RestBfdKey(), GET, func() (interface{}, error) { return plugin.bfdHandler.DumpBfdSingleHop() - }); err != nil { - return err - } + }) // GET BFD sessions - if err := plugin.registerHTTPHandler(bfd.RestSessionKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(bfd.RestSessionKey(), GET, func() (interface{}, error) { return plugin.bfdHandler.DumpBfdSessions() - }); err != nil { - return err - } + }) // GET BFD authentication keys - if err := plugin.registerHTTPHandler(bfd.RestAuthKeysKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(bfd.RestAuthKeysKey(), GET, func() (interface{}, error) { return plugin.bfdHandler.DumpBfdAuthKeys() - }); err != nil { - return err - } + }) return nil } @@ -169,53 +145,41 @@ func (plugin *Plugin) registerBfdHandlers() error { // Registers L2 plugin REST handlers func (plugin *Plugin) registerL2Handlers() error { // GET bridge domain IDs - if err := plugin.registerHTTPHandler(l2.RestBridgeDomainIDKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(l2.RestBridgeDomainIDKey(), GET, func() (interface{}, error) { return plugin.bdHandler.DumpBridgeDomainIDs() - }); err != nil { - return err - } + }) // GET bridge domains - if err := plugin.registerHTTPHandler(l2.RestBridgeDomainKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(l2.RestBridgeDomainKey(), GET, func() (interface{}, error) { return plugin.bdHandler.DumpBridgeDomains() - }); err != nil { - return err - } + }) // GET FIB entries - if err := plugin.registerHTTPHandler(l2.RestFibKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(l2.RestFibKey(), GET, func() (interface{}, error) { return plugin.fibHandler.DumpFIBTableEntries() - }); err != nil { - return err - } + }) // GET cross connects - if err := plugin.registerHTTPHandler(l2.RestXConnectKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(l2.RestXConnectKey(), GET, func() (interface{}, error) { return plugin.xcHandler.DumpXConnectPairs() - }); err != nil { - return err - } + }) return nil } // registerHTTPHandler is common register method for all handlers -func (plugin *Plugin) registerHTTPHandler(key, method string, f func() (interface{}, error)) error { - var err error +func (plugin *Plugin) registerHTTPHandler(key, method string, f func() (interface{}, error)) { handlerFunc := func(formatter *render.Render) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { res, err := f() if err != nil { plugin.Deps.Log.Errorf("Error: %v", err) - err = formatter.JSON(w, http.StatusInternalServerError, err) + w.Write([]byte("500 Internal server error: " + err.Error())) + formatter.JSON(w, http.StatusInternalServerError, err) + return } - plugin.Deps.Log.Debug(res) formatter.JSON(w, http.StatusOK, res) } } - if err != nil { - return err - } plugin.HTTPHandlers.RegisterHTTPHandler(key, handlerFunc, method) - return nil } // staticRoutesGetHandler - used to get list of all static routes From c7a9232d3adff6948b95f5ff755dce2e6e83c138 Mon Sep 17 00:00:00 2001 From: Ondrej Fabry Date: Thu, 26 Jul 2018 09:45:58 +0200 Subject: [PATCH 054/174] Use specific version of markdown-link-check because latest is broken Signed-off-by: Ondrej Fabry --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ddd0a48fae..5b8e93ec3c 100644 --- a/Makefile +++ b/Makefile @@ -198,7 +198,7 @@ format: # Get link check tool get-linkcheck: sudo apt-get install npm - npm install -g markdown-link-check + npm install -g markdown-link-check@3.6.2 # Validate links in markdown files check-links: get-linkcheck From 49aff14af3d252889f881385cd95528915f875b3 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Thu, 26 Jul 2018 09:47:55 +0200 Subject: [PATCH 055/174] rest keys moved to rest plugin Signed-off-by: Vladimir Lavor --- plugins/rest/plugin_impl_rest.go | 30 ++-- plugins/rest/rest_handlers.go | 101 +++-------- plugins/rest/url/rest_url.go | 157 ++++++++++++++++++ plugins/vpp/ifplugin/data_resync.go | 2 +- plugins/vpp/model/acl/keys_agent_acl.go | 32 +--- plugins/vpp/model/api_version.go | 18 -- plugins/vpp/model/bfd/keys_agent.go | 29 +--- .../model/interfaces/keys_agent_interfaces.go | 57 +------ plugins/vpp/model/l2/keys_agent_l2.go | 36 +--- 9 files changed, 201 insertions(+), 261 deletions(-) create mode 100644 plugins/rest/url/rest_url.go delete mode 100644 plugins/vpp/model/api_version.go diff --git a/plugins/rest/plugin_impl_rest.go b/plugins/rest/plugin_impl_rest.go index ea7a5da0bb..c622dd8cea 100644 --- a/plugins/rest/plugin_impl_rest.go +++ b/plugins/rest/plugin_impl_rest.go @@ -22,13 +22,11 @@ import ( "github.com/ligato/cn-infra/rpc/rest" "github.com/ligato/cn-infra/utils/safeclose" "github.com/ligato/vpp-agent/plugins/govppmux" + "github.com/ligato/vpp-agent/plugins/rest/url" "github.com/ligato/vpp-agent/plugins/vpp" aclvppcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" l2vppcalls "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" - "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" ) const ( @@ -111,19 +109,19 @@ func (plugin *Plugin) Init() (err error) { } plugin.indexItems = []indexItem{ - {Name: "ACL IP", Path: acl.RestIPKey()}, - {Name: "ACL MACIP", Path: acl.RestMACIPKey()}, - {Name: "Interfaces", Path: interfaces.RestInterfaceKey()}, - {Name: "Loopback interfaces", Path: interfaces.RestLoopbackKey()}, - {Name: "Ethernet interfaces", Path: interfaces.RestEthernetKey()}, - {Name: "Memif interfaces", Path: interfaces.RestMemifKey()}, - {Name: "Tap interfaces", Path: interfaces.RestTapKey()}, - {Name: "VxLAN interfaces", Path: interfaces.RestVxLanKey()}, - {Name: "Af-packet nterfaces", Path: interfaces.RestAfPAcketKey()}, - {Name: "Bridge domains", Path: l2.RestBridgeDomainKey()}, - {Name: "Bridge domain IDs", Path: l2.RestBridgeDomainIDKey()}, - {Name: "L2Fibs", Path: l2.RestFibKey()}, - {Name: "XConnectorPairs", Path: l2.RestXConnectKey()}, + {Name: "ACL IP", Path: url.RestIPKey()}, + {Name: "ACL MACIP", Path: url.RestMACIPKey()}, + {Name: "Interfaces", Path: url.RestInterfaceKey()}, + {Name: "Loopback interfaces", Path: url.RestLoopbackKey()}, + {Name: "Ethernet interfaces", Path: url.RestEthernetKey()}, + {Name: "Memif interfaces", Path: url.RestMemifKey()}, + {Name: "Tap interfaces", Path: url.RestTapKey()}, + {Name: "VxLAN interfaces", Path: url.RestVxLanKey()}, + {Name: "Af-packet nterfaces", Path: url.RestAfPAcketKey()}, + {Name: "Bridge domains", Path: url.RestBridgeDomainKey()}, + {Name: "Bridge domain IDs", Path: url.RestBridgeDomainIDKey()}, + {Name: "L2Fibs", Path: url.RestFibKey()}, + {Name: "XConnectorPairs", Path: url.RestXConnectKey()}, {Name: "ARPs", Path: "/arps"}, {Name: "Static routes", Path: "/staticroutes"}, diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index e812a4172a..0418730c2f 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -29,28 +29,27 @@ import ( "github.com/ligato/vpp-agent/plugins/govppmux/vppcalls" "github.com/unrolled/render" + "github.com/ligato/vpp-agent/plugins/rest/url" aclcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" l3plugin "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" ) // Registers access list REST handlers func (plugin *Plugin) registerAccessListHandlers() error { // GET IP ACLs - plugin.registerHTTPHandler(acl.RestIPKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestIPKey(), GET, func() (interface{}, error) { return plugin.aclHandler.DumpIPACL(nil) }) // GET MACIP ACLs - plugin.registerHTTPHandler(acl.RestMACIPKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestMACIPKey(), GET, func() (interface{}, error) { return plugin.aclHandler.DumpMacIPAcls() }) // GET IP ACL example - plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestIPExampleKey(), plugin.exampleIpACLGetHandler, GET) + plugin.HTTPHandlers.RegisterHTTPHandler(url.RestIPExampleKey(), plugin.exampleIpACLGetHandler, GET) // GET MACIP ACL example - plugin.HTTPHandlers.RegisterHTTPHandler(acl.RestMACIPExampleKey(), plugin.exampleMacIpACLGetHandler, GET) + plugin.HTTPHandlers.RegisterHTTPHandler(url.RestMACIPExampleKey(), plugin.exampleMacIpACLGetHandler, GET) return nil } @@ -58,11 +57,11 @@ func (plugin *Plugin) registerAccessListHandlers() error { // Registers interface REST handlers func (plugin *Plugin) registerInterfaceHandlers() error { // GET all interfaces - plugin.registerHTTPHandler(interfaces.RestInterfaceKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestInterfaceKey(), GET, func() (interface{}, error) { return plugin.ifHandler.DumpInterfaces() }) // GET loopback interfaces - plugin.registerHTTPHandler(interfaces.RestLoopbackKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestLoopbackKey(), GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_SOFTWARE_LOOPBACK { @@ -72,7 +71,7 @@ func (plugin *Plugin) registerInterfaceHandlers() error { return ifs, err }) // GET ethernet interfaces - plugin.registerHTTPHandler(interfaces.RestEthernetKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestEthernetKey(), GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_ETHERNET_CSMACD { @@ -82,7 +81,7 @@ func (plugin *Plugin) registerInterfaceHandlers() error { return ifs, err }) // GET memif interfaces - plugin.registerHTTPHandler(interfaces.RestMemifKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestMemifKey(), GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_MEMORY_INTERFACE { @@ -92,7 +91,7 @@ func (plugin *Plugin) registerInterfaceHandlers() error { return ifs, err }) // GET tap interfaces - plugin.registerHTTPHandler(interfaces.RestTapKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestTapKey(), GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_TAP_INTERFACE { @@ -102,7 +101,7 @@ func (plugin *Plugin) registerInterfaceHandlers() error { return ifs, err }) // GET af-packet interfaces - plugin.registerHTTPHandler(interfaces.RestAfPAcketKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestAfPAcketKey(), GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_AF_PACKET_INTERFACE { @@ -112,7 +111,7 @@ func (plugin *Plugin) registerInterfaceHandlers() error { return ifs, err }) // GET VxLAN interfaces - plugin.registerHTTPHandler(interfaces.RestVxLanKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestVxLanKey(), GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_VXLAN_TUNNEL { @@ -127,15 +126,15 @@ func (plugin *Plugin) registerInterfaceHandlers() error { func (plugin *Plugin) registerBfdHandlers() error { // GET BFD configuration - plugin.registerHTTPHandler(bfd.RestBfdKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestBfdKey(), GET, func() (interface{}, error) { return plugin.bfdHandler.DumpBfdSingleHop() }) // GET BFD sessions - plugin.registerHTTPHandler(bfd.RestSessionKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestSessionKey(), GET, func() (interface{}, error) { return plugin.bfdHandler.DumpBfdSessions() }) // GET BFD authentication keys - plugin.registerHTTPHandler(bfd.RestAuthKeysKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestAuthKeysKey(), GET, func() (interface{}, error) { return plugin.bfdHandler.DumpBfdAuthKeys() }) @@ -145,19 +144,19 @@ func (plugin *Plugin) registerBfdHandlers() error { // Registers L2 plugin REST handlers func (plugin *Plugin) registerL2Handlers() error { // GET bridge domain IDs - plugin.registerHTTPHandler(l2.RestBridgeDomainIDKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestBridgeDomainIDKey(), GET, func() (interface{}, error) { return plugin.bdHandler.DumpBridgeDomainIDs() }) // GET bridge domains - plugin.registerHTTPHandler(l2.RestBridgeDomainKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestBridgeDomainKey(), GET, func() (interface{}, error) { return plugin.bdHandler.DumpBridgeDomains() }) // GET FIB entries - plugin.registerHTTPHandler(l2.RestFibKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestFibKey(), GET, func() (interface{}, error) { return plugin.fibHandler.DumpFIBTableEntries() }) // GET cross connects - plugin.registerHTTPHandler(l2.RestXConnectKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(url.RestXConnectKey(), GET, func() (interface{}, error) { return plugin.xcHandler.DumpXConnectPairs() }) @@ -294,68 +293,6 @@ func (plugin *Plugin) interfaceACLGetHandler(formatter *render.Render) http.Hand } } -// ipACLGetHandler - used to get configuration of IP ACLs -func (plugin *Plugin) ipACLGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - - plugin.Log.Debug("Getting acls") - - // create an API channel - ch, err := plugin.GoVppmux.NewAPIChannel() - if err != nil { - plugin.Log.Errorf("Error creating channel: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - defer ch.Close() - aclHandler, err := aclcalls.NewAclVppHandler(ch, ch, nil) - if err != nil { - plugin.Log.Errorf("Error creating VPP handler: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - res, err := aclHandler.DumpIPACL(nil) - if err != nil { - plugin.Deps.Log.Errorf("Error: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - - plugin.Deps.Log.Debug(res) - formatter.JSON(w, http.StatusOK, res) - } -} - -func (plugin *Plugin) macipACLGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - plugin.Deps.Log.Info("Getting macip acls") - - // create an API channel - ch, err := plugin.Deps.GoVppmux.NewAPIChannel() - defer ch.Close() - if err != nil { - plugin.Deps.Log.Errorf("Error: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - aclHandler, err := aclcalls.NewAclVppHandler(ch, ch, nil) - if err != nil { - plugin.Log.Errorf("Error creating VPP handler: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - res, err := aclHandler.DumpMACIPACL(nil) - if err != nil { - plugin.Log.Errorf("Error: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - - plugin.Log.Debug(res) - formatter.JSON(w, http.StatusOK, res) - } -} - // exampleACLGetHandler - used to get an example ACL configuration func (plugin *Plugin) exampleIpACLGetHandler(formatter *render.Render) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { diff --git a/plugins/rest/url/rest_url.go b/plugins/rest/url/rest_url.go new file mode 100644 index 0000000000..4e7fc3d126 --- /dev/null +++ b/plugins/rest/url/rest_url.go @@ -0,0 +1,157 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package url + +// Access list REST keys +const ( + // REST Acl IP prefix + restAclIP = "/v1/acl/ip" + // REST Acl IP example prefix + restAclIPExample = "/v1/acl/ip/example" + // REST Acl MACIP prefix + restAclMACIP = "/v1/acl/macip" + // REST Acl MACIP example prefix + restAclMACIPExample = "/v1/acl/macip/example" +) + +// RestIPKey returns prefix used in REST to dump ACL IP config +func RestIPKey() string { + return restAclIP +} + +// RestIPExampleKey returns prefix used in REST to dump ACL IP example config +func RestIPExampleKey() string { + return restAclIPExample +} + +// RestMACIPKey returns prefix used in REST to dump ACL MACIP config +func RestMACIPKey() string { + return restAclMACIP +} + +// RestMACIPExampleKey returns prefix used in REST to dump ACL MACIP example config +func RestMACIPExampleKey() string { + return restAclMACIPExample +} + +// BFD REST keys +const ( + // restBfdKey is a REST path of a bfd + restBfdKey = "/v1/bfd" + // restBfdSessionKey is a REST path of a bfd sessions + restBfdSessionKey = "/v1/bfd/sessions" + // restBfdAuthKey is a REST path of a bfd authentication keys + restBfdAuthKey = "/v1/bfd/authkeys" +) + +// RestBfdKey returns prefix used in REST to dump bfd config +func RestBfdKey() string { + return restBfdKey +} + +// RestSessionKey returns prefix used in REST to dump bfd session config +func RestSessionKey() string { + return restBfdSessionKey +} + +// RestAuthKeysKey returns prefix used in REST to dump bfd authentication config +func RestAuthKeysKey() string { + return restBfdAuthKey +} + +// Interface REST keys +const ( + // restInterface is rest interface path + restInterface = "/v1/interfaces" + // restLoopback is path for loopback interface + restLoopback = "/v1/interfaces/loopback" + // restLoopback is path for physical interface + restEthernet = "/v1/interfaces/ethernet" + // restLoopback is path for memif interface + restMemif = "/v1/interfaces/memif" + // restLoopback is path for tap interface + restTap = "/v1/interfaces/tap" + // restAfPacket is path for af-packet interface + restAfPacket = "/v1/interfaces/afpacket" + // restLoopback is path for vxlan interface + restVxLan = "/v1/interfaces/vxlan" +) + +// RestInterfaceKey returns prefix used in REST to dump interface config +func RestInterfaceKey() string { + return restInterface +} + +// RestLoopbackKey returns prefix used in REST to dump loopback interface config +func RestLoopbackKey() string { + return restLoopback +} + +// RestEthernetKey returns prefix used in REST to dump ethernet interface config +func RestEthernetKey() string { + return restEthernet +} + +// RestMemifKey returns prefix used in REST to dump memif interface config +func RestMemifKey() string { + return restMemif +} + +// RestTapKey returns prefix used in REST to dump tap interface config +func RestTapKey() string { + return restTap +} + +// RestAfPAcketKey returns prefix used in REST to dump af-packet interface config +func RestAfPAcketKey() string { + return restAfPacket +} + +// RestVxLanKey returns prefix used in REST to dump VxLAN interface config +func RestVxLanKey() string { + return restVxLan +} + +// L2 plugin +const ( + // restBd is rest bridge domain path + restBd = "/v1/bd" + // restBdId is rest bridge domain ID path + restBdId = "/v1/bdid" + // restFib is rest FIB path + restFib = "/v1/fib" + // restXc is rest cross-connect path + restXc = "/v1/xc" +) + +// RestBridgeDomainKey returns the key used in REST to dump bridge domains. +func RestBridgeDomainKey() string { + return restBd +} + +// RestBridgeDomainIDKey returns the key used in REST to dump bridge domain IDs. +func RestBridgeDomainIDKey() string { + return restBdId +} + +// RestFibKey returns the prefix used in REST to dump vpp fib table entry config. +func RestFibKey() string { + return restFib +} + +// RestXConnectKey returns the prefix used in REST to dump vpp xConnect pair config. +func RestXConnectKey() string { + return restXc +} diff --git a/plugins/vpp/ifplugin/data_resync.go b/plugins/vpp/ifplugin/data_resync.go index 9d0345e857..72b4f5a378 100644 --- a/plugins/vpp/ifplugin/data_resync.go +++ b/plugins/vpp/ifplugin/data_resync.go @@ -233,7 +233,7 @@ func (plugin *BFDConfigurator) ResyncSession(nbSessions []*bfd.SingleHopBFD_Sess // Dump all BFD vppSessions vppBfdSessions, err := plugin.bfdHandler.DumpBfdSessions() - if err != nil || vppBfdSessions == nil { + if err != nil { return err } diff --git a/plugins/vpp/model/acl/keys_agent_acl.go b/plugins/vpp/model/acl/keys_agent_acl.go index 7299e8c547..24a8f5d125 100644 --- a/plugins/vpp/model/acl/keys_agent_acl.go +++ b/plugins/vpp/model/acl/keys_agent_acl.go @@ -14,19 +14,9 @@ package acl -import "github.com/ligato/vpp-agent/plugins/vpp/model" - const ( // DB key prefix - aclPrefix = "vpp/config" + model.ProtoApiVersion + "acl/" - // REST Acl IP prefix - restAclIP = model.ProtoApiVersion + "acl/ip" - // REST Acl IP example prefix - restAclIPExample = model.ProtoApiVersion + "acl/ip/example" - // REST Acl MACIP prefix - restAclMACIP = model.ProtoApiVersion + "acl/macip" - // REST Acl MACIP example prefix - restAclMACIPExample = model.ProtoApiVersion + "acl/macip/example" + aclPrefix = "vpp/config/v1/acl/" ) // KeyPrefix returns the prefix used in ETCD to store vpp ACLs config. @@ -39,23 +29,3 @@ func KeyPrefix() string { func Key(aclName string) string { return aclPrefix + aclName } - -// RestIPKey returns prefix used in REST to dump ACL IP config -func RestIPKey() string { - return restAclIP -} - -// RestIPExampleKey returns prefix used in REST to dump ACL IP example config -func RestIPExampleKey() string { - return restAclIPExample -} - -// RestMACIPKey returns prefix used in REST to dump ACL MACIP config -func RestMACIPKey() string { - return restAclMACIP -} - -// RestMACIPExampleKey returns prefix used in REST to dump ACL MACIP example config -func RestMACIPExampleKey() string { - return restAclMACIPExample -} diff --git a/plugins/vpp/model/api_version.go b/plugins/vpp/model/api_version.go deleted file mode 100644 index 62d7083929..0000000000 --- a/plugins/vpp/model/api_version.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) 2018 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// ProtoApiVersion is current version of the northbound api -const ProtoApiVersion = "/v1/" diff --git a/plugins/vpp/model/bfd/keys_agent.go b/plugins/vpp/model/bfd/keys_agent.go index b695861430..754b7f63ae 100644 --- a/plugins/vpp/model/bfd/keys_agent.go +++ b/plugins/vpp/model/bfd/keys_agent.go @@ -14,28 +14,15 @@ package bfd -import "github.com/ligato/vpp-agent/plugins/vpp/model" - const ( - // restBfdKey is a REST path of a bfd - restBfdKey = model.ProtoApiVersion + "bfd" // bfdSessionPrefix bfd-session/ - bfdSessionPrefix = "vpp/config" + model.ProtoApiVersion + "bfd/session/" - // restBfdSessionKey is a REST path of a bfd sessions - restBfdSessionKey = model.ProtoApiVersion + "bfd/sessions" + bfdSessionPrefix = "vpp/config/v1/bfd/session/" // bfdAuthKeysPrefix bfd-key/ - bfdAuthKeysPrefix = "vpp/config" + model.ProtoApiVersion + "bfd/auth-key/" - // restBfdAuthKey is a REST path of a bfd authentication keys - restBfdAuthKey = model.ProtoApiVersion + "bfd/authkeys" + bfdAuthKeysPrefix = "vpp/config/v1/bfd/auth-key/" // BfdEchoFunctionPrefix bfd-echo-function/ - bfdEchoFunctionPrefix = "vpp/config" + model.ProtoApiVersion + "bfd/echo-function" + bfdEchoFunctionPrefix = "vpp/config/v1/bfd/echo-function" ) -// RestBfdKey returns prefix used in REST to dump bfd config -func RestBfdKey() string { - return restBfdKey -} - // SessionKeyPrefix returns the prefix used in ETCD to store vpp bfd config. func SessionKeyPrefix() string { return bfdSessionPrefix @@ -47,11 +34,6 @@ func SessionKey(bfdSessionIfaceLabel string) string { return bfdSessionPrefix + bfdSessionIfaceLabel } -// RestSessionKey returns prefix used in REST to dump bfd session config -func RestSessionKey() string { - return restBfdSessionKey -} - // AuthKeysKeyPrefix returns the prefix used in ETCD to store vpp bfd config. func AuthKeysKeyPrefix() string { return bfdAuthKeysPrefix @@ -63,11 +45,6 @@ func AuthKeysKey(bfdKeyIDLabel string) string { return bfdAuthKeysPrefix + bfdKeyIDLabel } -// RestAuthKeysKey returns prefix used in REST to dump bfd authentication config -func RestAuthKeysKey() string { - return restBfdAuthKey -} - // EchoFunctionKeyPrefix returns the prefix used in ETCD to store vpp bfd config. func EchoFunctionKeyPrefix() string { return bfdEchoFunctionPrefix diff --git a/plugins/vpp/model/interfaces/keys_agent_interfaces.go b/plugins/vpp/model/interfaces/keys_agent_interfaces.go index 8f62b4e85a..88fce87b19 100644 --- a/plugins/vpp/model/interfaces/keys_agent_interfaces.go +++ b/plugins/vpp/model/interfaces/keys_agent_interfaces.go @@ -17,31 +17,15 @@ package interfaces import ( "fmt" "strings" - - "github.com/ligato/vpp-agent/plugins/vpp/model" ) const ( // interfacePrefix is interface prefix - interfacePrefix = "vpp/config" + model.ProtoApiVersion + "interface/" + interfacePrefix = "vpp/config/v1/interface/" // ifStatePrefix is interface state prefix - ifStatePrefix = "vpp/status" + model.ProtoApiVersion + "interface/" + ifStatePrefix = "vpp/status/v1/interface/" // ifErrorPrefix is interface error prefix - ifErrorPrefix = "vpp/status" + model.ProtoApiVersion + "interface/error/" - // restInterface is rest interface path - restInterface = model.ProtoApiVersion + "interfaces" - // restLoopback is path for loopback interface - restLoopback = model.ProtoApiVersion + "interfaces/loopback" - // restLoopback is path for physical interface - restEthernet = model.ProtoApiVersion + "interfaces/ethernet" - // restLoopback is path for memif interface - restMemif = model.ProtoApiVersion + "interfaces/memif" - // restLoopback is path for tap interface - restTap = model.ProtoApiVersion + "interfaces/tap" - // restAfPacket is path for af-packet interface - restAfPacket = model.ProtoApiVersion + "interfaces/afpacket" - // restLoopback is path for vxlan interface - restVxLan = model.ProtoApiVersion + "interfaces/vxlan" + ifErrorPrefix = "vpp/status/v1/interface/error/" ) // InterfaceKeyPrefix returns the prefix used in ETCD to store vpp interfaces config. @@ -85,38 +69,3 @@ func InterfaceStateKeyPrefix() string { func InterfaceStateKey(ifaceLabel string) string { return ifStatePrefix + ifaceLabel } - -// RestInterfaceKey returns prefix used in REST to dump interface config -func RestInterfaceKey() string { - return restInterface -} - -// RestLoopbackKey returns prefix used in REST to dump loopback interface config -func RestLoopbackKey() string { - return restLoopback -} - -// RestEthernetKey returns prefix used in REST to dump ethernet interface config -func RestEthernetKey() string { - return restEthernet -} - -// RestMemifKey returns prefix used in REST to dump memif interface config -func RestMemifKey() string { - return restMemif -} - -// RestTapKey returns prefix used in REST to dump tap interface config -func RestTapKey() string { - return restTap -} - -// RestAfPAcketKey returns prefix used in REST to dump af-packet interface config -func RestAfPAcketKey() string { - return restAfPacket -} - -// RestVxLanKey returns prefix used in REST to dump VxLAN interface config -func RestVxLanKey() string { - return restVxLan -} diff --git a/plugins/vpp/model/l2/keys_agent_l2.go b/plugins/vpp/model/l2/keys_agent_l2.go index 2addfd7dfa..678da93287 100644 --- a/plugins/vpp/model/l2/keys_agent_l2.go +++ b/plugins/vpp/model/l2/keys_agent_l2.go @@ -17,30 +17,20 @@ package l2 import ( "fmt" "strings" - - "github.com/ligato/vpp-agent/plugins/vpp/model" ) // Prefixes const ( // bdPrefix is the relative key prefix for bridge domains. - bdPrefix = "vpp/config" + model.ProtoApiVersion + "bd/" + bdPrefix = "vpp/config/v1/bd/" // bdStatePrefix is the relative key prefix for bridge domain state. - bdStatePrefix = "vpp/status" + model.ProtoApiVersion + "bd/" + bdStatePrefix = "vpp/status/v1/bd/" // bdErrPrefix is the relative key prefix for the bridge domain error. - bdErrPrefix = "vpp/status" + model.ProtoApiVersion + "bd/error/" - // restBd is rest bridge domain path - restBd = model.ProtoApiVersion + "bd" - // restBdId is rest bridge domain ID path - restBdId = model.ProtoApiVersion + "bdid" + bdErrPrefix = "vpp/status/v1/bd/error/" // fibPrefix is the relative key prefix for FIB table entries. fibPrefix = "vpp/config/v1/bd/{bd}/fib/" - // restFib is rest FIB path - restFib = model.ProtoApiVersion + "fib" // xConnectPrefix is the relative key prefix for xconnects. xConnectPrefix = "vpp/config/v1/xconnect/" - // restXc is rest cross-connect path - restXc = model.ProtoApiVersion + "xc" ) // BridgeDomainKeyPrefix returns the prefix used in ETCD to store vpp bridge domain config. @@ -85,16 +75,6 @@ func ParseBDNameFromKey(key string) (name string, err error) { return key, fmt.Errorf("wrong format of the key %s", key) } -// RestBridgeDomainKey returns the key used in REST to dump bridge domains. -func RestBridgeDomainKey() string { - return restBd -} - -// RestBridgeDomainIDKey returns the key used in REST to dump bridge domain IDs. -func RestBridgeDomainIDKey() string { - return restBdId -} - // FibKeyPrefix returns the prefix used in ETCD to store vpp fib table entry config. func FibKeyPrefix() string { return fibPrefix @@ -118,11 +98,6 @@ func ParseFibKey(key string) (isFibKey bool, bdName string, fibMac string) { return false, "", "" } -// RestFibKey returns the prefix used in REST to dump vpp fib table entry config. -func RestFibKey() string { - return restFib -} - // XConnectKeyPrefix returns the prefix used in ETCD to store vpp xConnect pair config. func XConnectKeyPrefix() string { return xConnectPrefix @@ -133,8 +108,3 @@ func XConnectKeyPrefix() string { func XConnectKey(rxIface string) string { return xConnectPrefix + rxIface } - -// RestXConnectKey returns the prefix used in REST to dump vpp xConnect pair config. -func RestXConnectKey() string { - return restXc -} From bf2eb1cfd709746d152c1ddaa4998874fb598ab4 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Thu, 26 Jul 2018 10:49:15 +0200 Subject: [PATCH 056/174] remove methods returning constants (only if, bfd, acl, l2) Signed-off-by: Vladimir Lavor --- clientv1/linux/dbadapter/data_resync_db.go | 2 +- clientv1/vpp/dbadapter/data_resync_db.go | 6 +- cmd/agentctl/cmd/clean_cmd.go | 8 +- cmd/agentctl/utils/common_utils_test.go | 14 +- cmd/agentctl/utils/db_utils.go | 16 +- cmd/vpp-agent-ctl/data_cmd.go | 4 +- plugins/rest/README.md | 36 ++-- plugins/rest/plugin_impl_rest.go | 28 ++-- plugins/rest/rest_handlers.go | 38 ++--- plugins/rest/resturl/rest_url.go | 67 ++++++++ plugins/rest/url/rest_url.go | 157 ------------------ plugins/vpp/data_change.go | 16 +- plugins/vpp/data_resync.go | 46 ++--- plugins/vpp/error_status.go | 16 +- plugins/vpp/ifplugin/ifaceidx/cache_iface.go | 2 +- plugins/vpp/l2plugin/l2idx/cache_bd.go | 2 +- plugins/vpp/model/acl/keys_agent_acl.go | 11 +- plugins/vpp/model/bfd/keys_agent.go | 27 +-- .../model/interfaces/keys_agent_interfaces.go | 27 +-- plugins/vpp/model/l2/keys_agent_l2.go | 49 ++---- plugins/vpp/watch_events.go | 4 +- 21 files changed, 213 insertions(+), 363 deletions(-) create mode 100644 plugins/rest/resturl/rest_url.go delete mode 100644 plugins/rest/url/rest_url.go diff --git a/clientv1/linux/dbadapter/data_resync_db.go b/clientv1/linux/dbadapter/data_resync_db.go index 877253f6fe..d3f3991463 100644 --- a/clientv1/linux/dbadapter/data_resync_db.go +++ b/clientv1/linux/dbadapter/data_resync_db.go @@ -213,7 +213,7 @@ func (dsl *DataResyncDSL) Send() vppclient.Reply { toBeDeleted := keySet{} // fill all known keys associated with the Linux network configuration: - keys, err := dsl.listKeys(interfaces.InterfaceKeyPrefix()) + keys, err := dsl.listKeys(interfaces.Prefix) if err != nil { break } diff --git a/clientv1/vpp/dbadapter/data_resync_db.go b/clientv1/vpp/dbadapter/data_resync_db.go index 306a8dac46..2548217010 100644 --- a/clientv1/vpp/dbadapter/data_resync_db.go +++ b/clientv1/vpp/dbadapter/data_resync_db.go @@ -245,17 +245,17 @@ func (dsl *DataResyncDSL) Send() vppclient.Reply { // fill all known keys of one VPP: - keys, err := dsl.listKeys(intf.InterfaceKeyPrefix()) + keys, err := dsl.listKeys(intf.Prefix) if err != nil { break } appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(l2.BridgeDomainKeyPrefix()) + keys, err = dsl.listKeys(l2.BdPrefix) if err != nil { break } appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(l2.XConnectKeyPrefix()) + keys, err = dsl.listKeys(l2.XConnectPrefix) if err != nil { break } diff --git a/cmd/agentctl/cmd/clean_cmd.go b/cmd/agentctl/cmd/clean_cmd.go index da57b01a2f..cde71d860b 100644 --- a/cmd/agentctl/cmd/clean_cmd.go +++ b/cmd/agentctl/cmd/clean_cmd.go @@ -55,9 +55,9 @@ If no data type filter is specified, all data for the specified vpp(s) will be deleted. If no [agent-label-filter] argument is specified, data for all agents will be deleted.`, dataTypeFlagName, dataTypeFlagName, - status.StatusPrefix, interfaces.InterfaceKeyPrefix(), - interfaces.InterfaceStateKeyPrefix(), l2.BridgeDomainKeyPrefix(), - l2.XConnectKeyPrefix(), l3.RoutesPrefix), + status.StatusPrefix, interfaces.Prefix, + interfaces.StatePrefix, l2.BdPrefix, + l2.XConnectPrefix, l3.RoutesPrefix), Example: fmt.Sprintf(` Delete all data for "vpp1": $ agentctl clean vpp1 Delete status data for "vpp1"": @@ -66,7 +66,7 @@ for all agents will be deleted.`, $ agentctl clean vpp1 -dataType %s,%s Delete all data for all agents (no filter): $ agentctl clean`, - status.StatusPrefix, status.StatusPrefix, interfaces.InterfaceKeyPrefix()), + status.StatusPrefix, status.StatusPrefix, interfaces.Prefix), Run: cleanFunc, } diff --git a/cmd/agentctl/utils/common_utils_test.go b/cmd/agentctl/utils/common_utils_test.go index a670e68f54..443206b1eb 100644 --- a/cmd/agentctl/utils/common_utils_test.go +++ b/cmd/agentctl/utils/common_utils_test.go @@ -46,7 +46,7 @@ func Test02ParseKeyInterfaceConfig(t *testing.T) { ParseKey("/vnf-agent/{agent-label}/vpp/config/v1/interface/{interface-name}") gomega.Expect(label).To(gomega.BeEquivalentTo("{agent-label}")) - gomega.Expect(dataType).To(gomega.BeEquivalentTo(interfaces.InterfaceKeyPrefix())) + gomega.Expect(dataType).To(gomega.BeEquivalentTo(interfaces.Prefix)) gomega.Expect(params).To(gomega.BeEquivalentTo("{interface-name}")) } @@ -58,7 +58,7 @@ func Test03ParseKeyInterfaceStatus(t *testing.T) { ParseKey("/vnf-agent/{agent-label}/vpp/status/v1/interface/{interface-name}") gomega.Expect(label).To(gomega.BeEquivalentTo("{agent-label}")) - gomega.Expect(dataType).To(gomega.BeEquivalentTo(interfaces.InterfaceStateKeyPrefix())) + gomega.Expect(dataType).To(gomega.BeEquivalentTo(interfaces.StatePrefix)) gomega.Expect(params).To(gomega.BeEquivalentTo("{interface-name}")) } @@ -70,7 +70,7 @@ func Test04ParseKeyInterfaceError(t *testing.T) { ParseKey("/vnf-agent/{agent-label}/vpp/status/v1/interface/error/{interface-name}") gomega.Expect(label).To(gomega.BeEquivalentTo("{agent-label}")) - gomega.Expect(dataType).To(gomega.BeEquivalentTo(interfaces.InterfaceErrorPrefix())) + gomega.Expect(dataType).To(gomega.BeEquivalentTo(interfaces.ErrorPrefix)) gomega.Expect(params).To(gomega.BeEquivalentTo("{interface-name}")) } @@ -82,7 +82,7 @@ func Test05ParseKeyBdConfig(t *testing.T) { ParseKey("/vnf-agent/{agent-label}/vpp/config/v1/bd/{bd-name}") gomega.Expect(label).To(gomega.BeEquivalentTo("{agent-label}")) - gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.BridgeDomainKeyPrefix())) + gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.BdPrefix)) gomega.Expect(params).To(gomega.BeEquivalentTo("{bd-name}")) } @@ -94,7 +94,7 @@ func Test06ParseKeyBdState(t *testing.T) { ParseKey("/vnf-agent/{agent-label}/vpp/status/v1/bd/{bd-name}") gomega.Expect(label).To(gomega.BeEquivalentTo("{agent-label}")) - gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.BridgeDomainStateKeyPrefix())) + gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.BdStatePrefix)) gomega.Expect(params).To(gomega.BeEquivalentTo("{bd-name}")) } @@ -106,7 +106,7 @@ func Test07ParseKeyBdError(t *testing.T) { ParseKey("/vnf-agent/{agent-label}/vpp/status/v1/bd/error/{bd-name}") gomega.Expect(label).To(gomega.BeEquivalentTo("{agent-label}")) - gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.BridgeDomainErrorPrefix())) + gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.BdErrPrefix)) gomega.Expect(params).To(gomega.BeEquivalentTo("{bd-name}")) } @@ -118,7 +118,7 @@ func Test08ParseKeyFib(t *testing.T) { ParseKey("/vnf-agent/{agent-label}/vpp/config/v1/bd/{bd-label}/fib/{mac-address}") gomega.Expect(label).To(gomega.BeEquivalentTo("{agent-label}")) - gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.FibKeyPrefix())) + gomega.Expect(dataType).To(gomega.BeEquivalentTo(l2.FibPrefix)) gomega.Expect(params).To(gomega.BeEquivalentTo("{mac-address}")) } diff --git a/cmd/agentctl/utils/db_utils.go b/cmd/agentctl/utils/db_utils.go index 544c14f0f1..dd0504cabc 100644 --- a/cmd/agentctl/utils/db_utils.go +++ b/cmd/agentctl/utils/db_utils.go @@ -190,21 +190,21 @@ func (ed EtcdDump) ReadDataFromDb(db keyval.ProtoBroker, key string, vd = newVppDataRecord() } switch dataType { - case interfaces.InterfaceKeyPrefix(): + case interfaces.Prefix: ed[label], err = readIfConfigFromDb(db, vd, key, params) - case interfaces.InterfaceStateKeyPrefix(): + case interfaces.StatePrefix: ed[label], err = readIfStateFromDb(db, vd, key, params) - case interfaces.InterfaceErrorPrefix(): + case interfaces.ErrorPrefix: ed[label], err = readInterfaceErrorFromDb(db, vd, key, params) - case l2.BridgeDomainKeyPrefix(): + case l2.BdPrefix: ed[label], err = readBdConfigFromDb(db, vd, key, params) - case l2.BridgeDomainStateKeyPrefix(): + case l2.BdStatePrefix: ed[label], err = readBdStateFromDb(db, vd, key, params) - case l2.BridgeDomainErrorPrefix(): + case l2.BdErrPrefix: ed[label], err = readBdErrorFromDb(db, vd, key, params) - case l2.FibKeyPrefix(): + case l2.FibPrefix: ed[label], err = readFibFromDb(db, vd, key) - case l2.XConnectKeyPrefix(): + case l2.XConnectPrefix: ed[label], err = readXconnectFromDb(db, vd, key, params) case l3.RoutesPrefix: ed[label], err = readRoutesFromDb(db, vd, key) diff --git a/cmd/vpp-agent-ctl/data_cmd.go b/cmd/vpp-agent-ctl/data_cmd.go index 200514fdd8..81899dbc5f 100644 --- a/cmd/vpp-agent-ctl/data_cmd.go +++ b/cmd/vpp-agent-ctl/data_cmd.go @@ -1274,7 +1274,7 @@ func (ctl *VppAgentCtl) deleteTxn() { // ReportIfaceErrorState reports interface status data to the ETCD func (ctl *VppAgentCtl) reportIfaceErrorState() { - ifErr, err := ctl.broker.ListValues(interfaces.InterfaceErrorPrefix()) + ifErr, err := ctl.broker.ListValues(interfaces.ErrorPrefix) if err != nil { ctl.Log.Fatal(err) return @@ -1296,7 +1296,7 @@ func (ctl *VppAgentCtl) reportIfaceErrorState() { // ReportBdErrorState reports bridge domain status data to the ETCD func (ctl *VppAgentCtl) reportBdErrorState() { - bdErr, err := ctl.broker.ListValues(l2.BridgeDomainErrorPrefix()) + bdErr, err := ctl.broker.ListValues(l2.BdErrPrefix) if err != nil { ctl.Log.Fatal(err) return diff --git a/plugins/rest/README.md b/plugins/rest/README.md index 76ef02298d..32c7d2283c 100644 --- a/plugins/rest/README.md +++ b/plugins/rest/README.md @@ -21,15 +21,15 @@ interfaces or ACLs, internal names, etc.). Those data are in separate section la URLs to obtain ACL IP/MACIP configuration are as follows. ``` -curl http://0.0.0.0:9191/v1/acl/ip -curl http://0.0.0.0:9191/v1/acl/macip +curl http://0.0.0.0:9191/vpp/v1/acl/ip +curl http://0.0.0.0:9191/vpp/v1/acl/macip ``` It is also possible to obtain ACL example configuration (no configured on the VPP). ``` -curl http://0.0.0.0:9191/v1/acl/ip/example -curl http://0.0.0.0:9191/v1/acl/macip/example +curl http://0.0.0.0:9191/vpp/v1/acl/ip/example +curl http://0.0.0.0:9191/vpp/v1/acl/macip/example ``` **Interfaces** @@ -38,13 +38,13 @@ REST plugin exposes configured interfaces, which can be show all together, or on of specific type. ``` -curl http://0.0.0.0:9191/v1/interfaces -curl http://0.0.0.0:9191/v1/interfaces/loopback -curl http://0.0.0.0:9191/v1/interfaces/ethernet -curl http://0.0.0.0:9191/v1/interfaces/memif -curl http://0.0.0.0:9191/v1/interfaces/tap -curl http://0.0.0.0:9191/v1/interfaces/vxlan -curl http://0.0.0.0:9191/v1/interfaces/afpacket +curl http://0.0.0.0:9191/vpp/v1/interfaces +curl http://0.0.0.0:9191/vpp/v1/interfaces/loopback +curl http://0.0.0.0:9191/vpp/v1/interfaces/ethernet +curl http://0.0.0.0:9191/vpp/v1/interfaces/memif +curl http://0.0.0.0:9191/vpp/v1/interfaces/tap +curl http://0.0.0.0:9191/vpp/v1/interfaces/vxlan +curl http://0.0.0.0:9191/vpp/v1/interfaces/afpacket ``` **BFD** @@ -53,9 +53,9 @@ REST plugin allows to dump bidirectional forwarding detection sessions, authenti or the whole configuration. ``` -curl http://0.0.0.0:9191/v1/bfd -curl http://0.0.0.0:9191/v1/bfd/sessions -curl http://0.0.0.0:9191/v1/bfd/authkeys +curl http://0.0.0.0:9191/vpp/v1/bfd +curl http://0.0.0.0:9191/vpp/v1/bfd/sessions +curl http://0.0.0.0:9191/vpp/v1/bfd/authkeys ``` **L2 plugin** @@ -64,10 +64,10 @@ Support for bridge domains, FIBs and cross connects. It is also possible to get the bridge domain IDs. ``` -curl http://0.0.0.0:9191/v1/bdid -curl http://0.0.0.0:9191/v1/bd -curl http://0.0.0.0:9191/v1/fibs -curl http://0.0.0.0:9191/v1/xc +curl http://0.0.0.0:9191/vpp/v1/bdid +curl http://0.0.0.0:9191/vpp/v1/bd +curl http://0.0.0.0:9191/vpp/v1/fibs +curl http://0.0.0.0:9191/vpp/v1/xc ``` **L3 plugin** diff --git a/plugins/rest/plugin_impl_rest.go b/plugins/rest/plugin_impl_rest.go index c622dd8cea..6af1234cb9 100644 --- a/plugins/rest/plugin_impl_rest.go +++ b/plugins/rest/plugin_impl_rest.go @@ -22,11 +22,11 @@ import ( "github.com/ligato/cn-infra/rpc/rest" "github.com/ligato/cn-infra/utils/safeclose" "github.com/ligato/vpp-agent/plugins/govppmux" - "github.com/ligato/vpp-agent/plugins/rest/url" "github.com/ligato/vpp-agent/plugins/vpp" aclvppcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" l2vppcalls "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" + "github.com/ligato/vpp-agent/plugins/rest/resturl" ) const ( @@ -109,19 +109,19 @@ func (plugin *Plugin) Init() (err error) { } plugin.indexItems = []indexItem{ - {Name: "ACL IP", Path: url.RestIPKey()}, - {Name: "ACL MACIP", Path: url.RestMACIPKey()}, - {Name: "Interfaces", Path: url.RestInterfaceKey()}, - {Name: "Loopback interfaces", Path: url.RestLoopbackKey()}, - {Name: "Ethernet interfaces", Path: url.RestEthernetKey()}, - {Name: "Memif interfaces", Path: url.RestMemifKey()}, - {Name: "Tap interfaces", Path: url.RestTapKey()}, - {Name: "VxLAN interfaces", Path: url.RestVxLanKey()}, - {Name: "Af-packet nterfaces", Path: url.RestAfPAcketKey()}, - {Name: "Bridge domains", Path: url.RestBridgeDomainKey()}, - {Name: "Bridge domain IDs", Path: url.RestBridgeDomainIDKey()}, - {Name: "L2Fibs", Path: url.RestFibKey()}, - {Name: "XConnectorPairs", Path: url.RestXConnectKey()}, + {Name: "ACL IP", Path: resturl.AclIP}, + {Name: "ACL MACIP", Path: resturl.AclMACIP}, + {Name: "Interfaces", Path: resturl.Interface}, + {Name: "Loopback interfaces", Path: resturl.Loopback}, + {Name: "Ethernet interfaces", Path: resturl.Ethernet}, + {Name: "Memif interfaces", Path: resturl.Memif}, + {Name: "Tap interfaces", Path: resturl.Tap}, + {Name: "VxLAN interfaces", Path: resturl.VxLan}, + {Name: "Af-packet nterfaces", Path: resturl.AfPacket}, + {Name: "Bridge domains", Path: resturl.Bd}, + {Name: "Bridge domain IDs", Path: resturl.BdId}, + {Name: "L2Fibs", Path: resturl.Fib}, + {Name: "XConnectorPairs", Path: resturl.Xc}, {Name: "ARPs", Path: "/arps"}, {Name: "Static routes", Path: "/staticroutes"}, diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index 0418730c2f..0d01198789 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -29,27 +29,27 @@ import ( "github.com/ligato/vpp-agent/plugins/govppmux/vppcalls" "github.com/unrolled/render" - "github.com/ligato/vpp-agent/plugins/rest/url" aclcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" l3plugin "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" + "github.com/ligato/vpp-agent/plugins/rest/resturl" ) // Registers access list REST handlers func (plugin *Plugin) registerAccessListHandlers() error { // GET IP ACLs - plugin.registerHTTPHandler(url.RestIPKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.AclIP, GET, func() (interface{}, error) { return plugin.aclHandler.DumpIPACL(nil) }) // GET MACIP ACLs - plugin.registerHTTPHandler(url.RestMACIPKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.AclMACIP, GET, func() (interface{}, error) { return plugin.aclHandler.DumpMacIPAcls() }) // GET IP ACL example - plugin.HTTPHandlers.RegisterHTTPHandler(url.RestIPExampleKey(), plugin.exampleIpACLGetHandler, GET) + plugin.HTTPHandlers.RegisterHTTPHandler(resturl.AclIPExample, plugin.exampleIpACLGetHandler, GET) // GET MACIP ACL example - plugin.HTTPHandlers.RegisterHTTPHandler(url.RestMACIPExampleKey(), plugin.exampleMacIpACLGetHandler, GET) + plugin.HTTPHandlers.RegisterHTTPHandler(resturl.AclMACIPExample, plugin.exampleMacIpACLGetHandler, GET) return nil } @@ -57,11 +57,11 @@ func (plugin *Plugin) registerAccessListHandlers() error { // Registers interface REST handlers func (plugin *Plugin) registerInterfaceHandlers() error { // GET all interfaces - plugin.registerHTTPHandler(url.RestInterfaceKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.Interface, GET, func() (interface{}, error) { return plugin.ifHandler.DumpInterfaces() }) // GET loopback interfaces - plugin.registerHTTPHandler(url.RestLoopbackKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.Loopback, GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_SOFTWARE_LOOPBACK { @@ -71,7 +71,7 @@ func (plugin *Plugin) registerInterfaceHandlers() error { return ifs, err }) // GET ethernet interfaces - plugin.registerHTTPHandler(url.RestEthernetKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.Ethernet, GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_ETHERNET_CSMACD { @@ -81,7 +81,7 @@ func (plugin *Plugin) registerInterfaceHandlers() error { return ifs, err }) // GET memif interfaces - plugin.registerHTTPHandler(url.RestMemifKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.Memif, GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_MEMORY_INTERFACE { @@ -91,7 +91,7 @@ func (plugin *Plugin) registerInterfaceHandlers() error { return ifs, err }) // GET tap interfaces - plugin.registerHTTPHandler(url.RestTapKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.Tap, GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_TAP_INTERFACE { @@ -101,7 +101,7 @@ func (plugin *Plugin) registerInterfaceHandlers() error { return ifs, err }) // GET af-packet interfaces - plugin.registerHTTPHandler(url.RestAfPAcketKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.AfPacket, GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_AF_PACKET_INTERFACE { @@ -111,7 +111,7 @@ func (plugin *Plugin) registerInterfaceHandlers() error { return ifs, err }) // GET VxLAN interfaces - plugin.registerHTTPHandler(url.RestVxLanKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.VxLan, GET, func() (interface{}, error) { ifs, err := plugin.ifHandler.DumpInterfaces() for ifKey, ifConfig := range ifs { if ifConfig.Interface.Type != interfaces.InterfaceType_VXLAN_TUNNEL { @@ -126,15 +126,15 @@ func (plugin *Plugin) registerInterfaceHandlers() error { func (plugin *Plugin) registerBfdHandlers() error { // GET BFD configuration - plugin.registerHTTPHandler(url.RestBfdKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.BfdKey, GET, func() (interface{}, error) { return plugin.bfdHandler.DumpBfdSingleHop() }) // GET BFD sessions - plugin.registerHTTPHandler(url.RestSessionKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.BfdSessionKey, GET, func() (interface{}, error) { return plugin.bfdHandler.DumpBfdSessions() }) // GET BFD authentication keys - plugin.registerHTTPHandler(url.RestAuthKeysKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.BfdAuthKey, GET, func() (interface{}, error) { return plugin.bfdHandler.DumpBfdAuthKeys() }) @@ -144,19 +144,19 @@ func (plugin *Plugin) registerBfdHandlers() error { // Registers L2 plugin REST handlers func (plugin *Plugin) registerL2Handlers() error { // GET bridge domain IDs - plugin.registerHTTPHandler(url.RestBridgeDomainIDKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.BdId, GET, func() (interface{}, error) { return plugin.bdHandler.DumpBridgeDomainIDs() }) // GET bridge domains - plugin.registerHTTPHandler(url.RestBridgeDomainKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.Bd, GET, func() (interface{}, error) { return plugin.bdHandler.DumpBridgeDomains() }) // GET FIB entries - plugin.registerHTTPHandler(url.RestFibKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.Fib, GET, func() (interface{}, error) { return plugin.fibHandler.DumpFIBTableEntries() }) // GET cross connects - plugin.registerHTTPHandler(url.RestXConnectKey(), GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.Xc, GET, func() (interface{}, error) { return plugin.xcHandler.DumpXConnectPairs() }) diff --git a/plugins/rest/resturl/rest_url.go b/plugins/rest/resturl/rest_url.go new file mode 100644 index 0000000000..7fc399eea3 --- /dev/null +++ b/plugins/rest/resturl/rest_url.go @@ -0,0 +1,67 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resturl + +// Access list REST keys +const ( + // REST Acl IP prefix + AclIP = "/vpp/v1/acl/ip" + // REST Acl IP example prefix + AclIPExample = "/vpp/v1/acl/ip/example" + // REST Acl MACIP prefix + AclMACIP = "/vpp/v1/acl/macip" + // REST Acl MACIP example prefix + AclMACIPExample = "/vpp/v1/acl/macip/example" +) + +// BFD REST keys +const ( + // restBfdKey is a REST path of a bfd + BfdKey = "/vpp/v1/bfd" + // restBfdSessionKey is a REST path of a bfd sessions + BfdSessionKey = "/vpp/v1/bfd/sessions" + // restBfdAuthKey is a REST path of a bfd authentication keys + BfdAuthKey = "/vpp/v1/bfd/authkeys" +) + +// Interface REST keys +const ( + // restInterface is rest interface path + Interface = "/vpp/v1/interfaces" + // restLoopback is path for loopback interface + Loopback = "/vpp/v1/interfaces/loopback" + // restLoopback is path for physical interface + Ethernet = "/vpp/v1/interfaces/ethernet" + // restLoopback is path for memif interface + Memif = "/vpp/v1/interfaces/memif" + // restLoopback is path for tap interface + Tap = "/vpp/v1/interfaces/tap" + // restAfPacket is path for af-packet interface + AfPacket = "/vpp/v1/interfaces/afpacket" + // restLoopback is path for vxlan interface + VxLan = "/vpp/v1/interfaces/vxlan" +) + +// L2 plugin +const ( + // restBd is rest bridge domain path + Bd = "/vpp/v1/bd" + // restBdId is rest bridge domain ID path + BdId = "/vpp/v1/bdid" + // restFib is rest FIB path + Fib = "/vpp/v1/fib" + // restXc is rest cross-connect path + Xc = "/vpp/v1/xc" +) diff --git a/plugins/rest/url/rest_url.go b/plugins/rest/url/rest_url.go deleted file mode 100644 index 4e7fc3d126..0000000000 --- a/plugins/rest/url/rest_url.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) 2018 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package url - -// Access list REST keys -const ( - // REST Acl IP prefix - restAclIP = "/v1/acl/ip" - // REST Acl IP example prefix - restAclIPExample = "/v1/acl/ip/example" - // REST Acl MACIP prefix - restAclMACIP = "/v1/acl/macip" - // REST Acl MACIP example prefix - restAclMACIPExample = "/v1/acl/macip/example" -) - -// RestIPKey returns prefix used in REST to dump ACL IP config -func RestIPKey() string { - return restAclIP -} - -// RestIPExampleKey returns prefix used in REST to dump ACL IP example config -func RestIPExampleKey() string { - return restAclIPExample -} - -// RestMACIPKey returns prefix used in REST to dump ACL MACIP config -func RestMACIPKey() string { - return restAclMACIP -} - -// RestMACIPExampleKey returns prefix used in REST to dump ACL MACIP example config -func RestMACIPExampleKey() string { - return restAclMACIPExample -} - -// BFD REST keys -const ( - // restBfdKey is a REST path of a bfd - restBfdKey = "/v1/bfd" - // restBfdSessionKey is a REST path of a bfd sessions - restBfdSessionKey = "/v1/bfd/sessions" - // restBfdAuthKey is a REST path of a bfd authentication keys - restBfdAuthKey = "/v1/bfd/authkeys" -) - -// RestBfdKey returns prefix used in REST to dump bfd config -func RestBfdKey() string { - return restBfdKey -} - -// RestSessionKey returns prefix used in REST to dump bfd session config -func RestSessionKey() string { - return restBfdSessionKey -} - -// RestAuthKeysKey returns prefix used in REST to dump bfd authentication config -func RestAuthKeysKey() string { - return restBfdAuthKey -} - -// Interface REST keys -const ( - // restInterface is rest interface path - restInterface = "/v1/interfaces" - // restLoopback is path for loopback interface - restLoopback = "/v1/interfaces/loopback" - // restLoopback is path for physical interface - restEthernet = "/v1/interfaces/ethernet" - // restLoopback is path for memif interface - restMemif = "/v1/interfaces/memif" - // restLoopback is path for tap interface - restTap = "/v1/interfaces/tap" - // restAfPacket is path for af-packet interface - restAfPacket = "/v1/interfaces/afpacket" - // restLoopback is path for vxlan interface - restVxLan = "/v1/interfaces/vxlan" -) - -// RestInterfaceKey returns prefix used in REST to dump interface config -func RestInterfaceKey() string { - return restInterface -} - -// RestLoopbackKey returns prefix used in REST to dump loopback interface config -func RestLoopbackKey() string { - return restLoopback -} - -// RestEthernetKey returns prefix used in REST to dump ethernet interface config -func RestEthernetKey() string { - return restEthernet -} - -// RestMemifKey returns prefix used in REST to dump memif interface config -func RestMemifKey() string { - return restMemif -} - -// RestTapKey returns prefix used in REST to dump tap interface config -func RestTapKey() string { - return restTap -} - -// RestAfPAcketKey returns prefix used in REST to dump af-packet interface config -func RestAfPAcketKey() string { - return restAfPacket -} - -// RestVxLanKey returns prefix used in REST to dump VxLAN interface config -func RestVxLanKey() string { - return restVxLan -} - -// L2 plugin -const ( - // restBd is rest bridge domain path - restBd = "/v1/bd" - // restBdId is rest bridge domain ID path - restBdId = "/v1/bdid" - // restFib is rest FIB path - restFib = "/v1/fib" - // restXc is rest cross-connect path - restXc = "/v1/xc" -) - -// RestBridgeDomainKey returns the key used in REST to dump bridge domains. -func RestBridgeDomainKey() string { - return restBd -} - -// RestBridgeDomainIDKey returns the key used in REST to dump bridge domain IDs. -func RestBridgeDomainIDKey() string { - return restBdId -} - -// RestFibKey returns the prefix used in REST to dump vpp fib table entry config. -func RestFibKey() string { - return restFib -} - -// RestXConnectKey returns the prefix used in REST to dump vpp xConnect pair config. -func RestXConnectKey() string { - return restXc -} diff --git a/plugins/vpp/data_change.go b/plugins/vpp/data_change.go index a972cceff5..a2acc371d6 100644 --- a/plugins/vpp/data_change.go +++ b/plugins/vpp/data_change.go @@ -35,11 +35,11 @@ func (plugin *Plugin) changePropagateRequest(dataChng datasync.ChangeEvent, call key := dataChng.GetKey() // Skip potential changes on error keys - if strings.HasPrefix(key, interfaces.InterfaceErrorPrefix()) || strings.HasPrefix(key, l2.BridgeDomainErrorPrefix()) { + if strings.HasPrefix(key, interfaces.ErrorPrefix) || strings.HasPrefix(key, l2.BdErrPrefix) { return false, nil } plugin.Log.Debug("Start processing change for key: ", key) - if strings.HasPrefix(key, acl.KeyPrefix()) { + if strings.HasPrefix(key, acl.Prefix) { var value, prevValue acl.AccessLists_Acl if err := dataChng.GetValue(&value); err != nil { return false, err @@ -51,7 +51,7 @@ func (plugin *Plugin) changePropagateRequest(dataChng datasync.ChangeEvent, call } else { return false, err } - } else if strings.HasPrefix(key, interfaces.InterfaceKeyPrefix()) { + } else if strings.HasPrefix(key, interfaces.Prefix) { var value, prevValue interfaces.Interfaces_Interface if err := dataChng.GetValue(&value); err != nil { return false, err @@ -63,7 +63,7 @@ func (plugin *Plugin) changePropagateRequest(dataChng datasync.ChangeEvent, call } else { return false, err } - } else if strings.HasPrefix(key, bfd.SessionKeyPrefix()) { + } else if strings.HasPrefix(key, bfd.SessionPrefix) { var value, prevValue bfd.SingleHopBFD_Session if err := dataChng.GetValue(&value); err != nil { return false, err @@ -75,7 +75,7 @@ func (plugin *Plugin) changePropagateRequest(dataChng datasync.ChangeEvent, call } else { return false, err } - } else if strings.HasPrefix(key, bfd.AuthKeysKeyPrefix()) { + } else if strings.HasPrefix(key, bfd.AuthKeysPrefix) { var value, prevValue bfd.SingleHopBFD_Key if err := dataChng.GetValue(&value); err != nil { return false, err @@ -87,7 +87,7 @@ func (plugin *Plugin) changePropagateRequest(dataChng datasync.ChangeEvent, call } else { return false, err } - } else if strings.HasPrefix(key, bfd.EchoFunctionKeyPrefix()) { + } else if strings.HasPrefix(key, bfd.EchoFunctionPrefix) { var value, prevValue bfd.SingleHopBFD_EchoFunction if err := dataChng.GetValue(&value); err != nil { return false, err @@ -99,7 +99,7 @@ func (plugin *Plugin) changePropagateRequest(dataChng datasync.ChangeEvent, call } else { return false, err } - } else if strings.HasPrefix(key, l2.BridgeDomainKeyPrefix()) { + } else if strings.HasPrefix(key, l2.BdPrefix) { fib, _, _ := l2.ParseFibKey(key) if fib { // L2 FIB entry @@ -128,7 +128,7 @@ func (plugin *Plugin) changePropagateRequest(dataChng datasync.ChangeEvent, call return false, err } } - } else if strings.HasPrefix(key, l2.XConnectKeyPrefix()) { + } else if strings.HasPrefix(key, l2.XConnectPrefix) { var value, prevValue l2.XConnectPairs_XConnectPair if err := dataChng.GetValue(&value); err != nil { return false, err diff --git a/plugins/vpp/data_resync.go b/plugins/vpp/data_resync.go index e056517254..11edcc4b2c 100644 --- a/plugins/vpp/data_resync.go +++ b/plugins/vpp/data_resync.go @@ -159,42 +159,42 @@ func (plugin *Plugin) resyncConfig(req *DataResyncReq) error { // store all resync errors var resyncErrs []error - if !plugin.droppedFromResync(interfaces.InterfaceKeyPrefix()) { + if !plugin.droppedFromResync(interfaces.Prefix) { if errs := plugin.ifConfigurator.Resync(req.Interfaces); errs != nil { resyncErrs = append(resyncErrs, errs...) } } - if !plugin.droppedFromResync(acl.KeyPrefix()) { + if !plugin.droppedFromResync(acl.Prefix) { if err := plugin.aclConfigurator.Resync(req.ACLs); err != nil { resyncErrs = append(resyncErrs, err) } } - if !plugin.droppedFromResync(bfd.AuthKeysKeyPrefix()) { + if !plugin.droppedFromResync(bfd.AuthKeysPrefix) { if err := plugin.bfdConfigurator.ResyncAuthKey(req.SingleHopBFDKey); err != nil { resyncErrs = append(resyncErrs, err) } } - if !plugin.droppedFromResync(bfd.SessionKeyPrefix()) { + if !plugin.droppedFromResync(bfd.SessionPrefix) { if err := plugin.bfdConfigurator.ResyncSession(req.SingleHopBFDSession); err != nil { resyncErrs = append(resyncErrs, err) } } - if !plugin.droppedFromResync(bfd.EchoFunctionKeyPrefix()) { + if !plugin.droppedFromResync(bfd.EchoFunctionPrefix) { if err := plugin.bfdConfigurator.ResyncEchoFunction(req.SingleHopBFDEcho); err != nil { resyncErrs = append(resyncErrs, err) } } - if !plugin.droppedFromResync(l2.BridgeDomainKeyPrefix()) { + if !plugin.droppedFromResync(l2.BdPrefix) { if err := plugin.bdConfigurator.Resync(req.BridgeDomains); err != nil { resyncErrs = append(resyncErrs, err) } } - if !plugin.droppedFromResync(l2.FibKeyPrefix()) { + if !plugin.droppedFromResync(l2.FibPrefix) { if err := plugin.fibConfigurator.Resync(req.FibTableEntries); err != nil { resyncErrs = append(resyncErrs, err) } } - if !plugin.droppedFromResync(l2.XConnectKeyPrefix()) { + if !plugin.droppedFromResync(l2.XConnectPrefix) { if err := plugin.xcConfigurator.Resync(req.XConnects); err != nil { resyncErrs = append(resyncErrs, err) } @@ -278,26 +278,26 @@ func (plugin *Plugin) resyncParseEvent(resyncEv datasync.ResyncEvent) *DataResyn if plugin.droppedFromResync(key) { continue } - if strings.HasPrefix(key, acl.KeyPrefix()) { + if strings.HasPrefix(key, acl.Prefix) { numAcls := appendACLInterface(resyncData, req) plugin.Log.Debug("Received RESYNC ACL values ", numAcls) - } else if strings.HasPrefix(key, interfaces.InterfaceKeyPrefix()) { + } else if strings.HasPrefix(key, interfaces.Prefix) { numInterfaces := appendResyncInterface(resyncData, req) plugin.Log.Debug("Received RESYNC interface values ", numInterfaces) - } else if strings.HasPrefix(key, bfd.SessionKeyPrefix()) { + } else if strings.HasPrefix(key, bfd.SessionPrefix) { numBfdSession := resyncAppendBfdSession(resyncData, req) plugin.Log.Debug("Received RESYNC BFD Session values ", numBfdSession) - } else if strings.HasPrefix(key, bfd.AuthKeysKeyPrefix()) { + } else if strings.HasPrefix(key, bfd.AuthKeysPrefix) { numBfdAuthKeys := resyncAppendBfdAuthKeys(resyncData, req) plugin.Log.Debug("Received RESYNC BFD Auth Key values ", numBfdAuthKeys) - } else if strings.HasPrefix(key, bfd.EchoFunctionKeyPrefix()) { + } else if strings.HasPrefix(key, bfd.EchoFunctionPrefix) { numBfdEchos := resyncAppendBfdEcho(resyncData, req) plugin.Log.Debug("Received RESYNC BFD Echo values ", numBfdEchos) - } else if strings.HasPrefix(key, l2.BridgeDomainKeyPrefix()) { + } else if strings.HasPrefix(key, l2.BdPrefix) { numBDs, numL2FIBs := resyncAppendBDs(resyncData, req) plugin.Log.Debug("Received RESYNC BD values ", numBDs) plugin.Log.Debug("Received RESYNC L2 FIB values ", numL2FIBs) - } else if strings.HasPrefix(key, l2.XConnectKeyPrefix()) { + } else if strings.HasPrefix(key, l2.XConnectPrefix) { numXCons := resyncAppendXCons(resyncData, req) plugin.Log.Debug("Received RESYNC XConnects values ", numXCons) } else if strings.HasPrefix(key, l3.VrfKeyPrefix()) { @@ -756,13 +756,13 @@ func (plugin *Plugin) subscribeWatcher() (err error) { plugin.watchConfigReg, err = plugin.Watch. Watch("Config VPP default plug:IF/L2/L3", plugin.changeChan, plugin.resyncConfigChan, - acl.KeyPrefix(), - interfaces.InterfaceKeyPrefix(), - bfd.SessionKeyPrefix(), - bfd.AuthKeysKeyPrefix(), - bfd.EchoFunctionKeyPrefix(), - l2.BridgeDomainKeyPrefix(), - l2.XConnectKeyPrefix(), + acl.Prefix, + interfaces.Prefix, + bfd.SessionPrefix, + bfd.AuthKeysPrefix, + bfd.EchoFunctionPrefix, + l2.BdPrefix, + l2.XConnectPrefix, l3.VrfKeyPrefix(), l3.ArpKeyPrefix(), l3.ProxyArpInterfacePrefix(), @@ -782,7 +782,7 @@ func (plugin *Plugin) subscribeWatcher() (err error) { plugin.watchStatusReg, err = plugin.Watch. Watch("Status VPP default plug:IF/L2/L3", nil, plugin.resyncStatusChan, - interfaces.InterfaceStateKeyPrefix(), l2.BridgeDomainStateKeyPrefix()) + interfaces.StatePrefix, l2.BdStatePrefix) if err != nil { return err } diff --git a/plugins/vpp/error_status.go b/plugins/vpp/error_status.go index bca4792348..03de2eac1b 100644 --- a/plugins/vpp/error_status.go +++ b/plugins/vpp/error_status.go @@ -63,7 +63,7 @@ func (plugin *Plugin) changePropagateError() { // Process provides error data and adds a new entry. func (plugin *Plugin) processError(errInfo error, key string, changeType datasync.PutDel, change datasync.ChangeEvent) { // Interfaces - if strings.HasPrefix(key, interfaces.InterfaceKeyPrefix()) { + if strings.HasPrefix(key, interfaces.Prefix) { var err error var iface, prevIface interfaces.Interfaces_Interface if err := change.GetValue(&iface); err != nil { @@ -92,7 +92,7 @@ func (plugin *Plugin) processError(errInfo error, key string, changeType datasyn plugin.Log.Errorf("Failed to propagate interface error, cause: %v", err) } // Bridge domains - } else if strings.HasPrefix(key, l2.BridgeDomainKeyPrefix()) { + } else if strings.HasPrefix(key, l2.BdPrefix) { var err error var bd, prevBd l2.BridgeDomains_BridgeDomain if err := change.GetValue(&bd); err != nil { @@ -250,11 +250,11 @@ func (plugin *Plugin) removeErrorLog(key string) { return } - if prefix == interfaces.InterfaceKeyPrefix() { + if prefix == interfaces.Prefix { key := interfaces.InterfaceErrorKey(name) plugin.Publish.Put(key, nil) plugin.Log.Infof("Error status log for interface %v cleared", name) - } else if prefix == l2.BridgeDomainKeyPrefix() { + } else if prefix == l2.BdPrefix { key := l2.BridgeDomainErrorKey(name) plugin.Publish.Put(key, nil) plugin.Log.Infof("Error status log for bridge domain %v cleared", name) @@ -269,11 +269,11 @@ func (plugin *Plugin) removeOldestErrorLogEntry(key string) { var name string var metaData interface{} var exists bool - if strings.HasPrefix(key, interfaces.InterfaceErrorPrefix()) { - name = strings.Replace(key, interfaces.InterfaceErrorPrefix(), "", 1) + if strings.HasPrefix(key, interfaces.ErrorPrefix) { + name = strings.Replace(key, interfaces.ErrorPrefix, "", 1) _, metaData, exists = plugin.errorIndexes.LookupIdx(name) - } else if strings.HasPrefix(key, l2.BridgeDomainErrorPrefix()) { - name = strings.Replace(key, l2.BridgeDomainErrorPrefix(), "", 1) + } else if strings.HasPrefix(key, l2.BdErrPrefix) { + name = strings.Replace(key, l2.BdErrPrefix, "", 1) _, metaData, exists = plugin.errorIndexes.LookupIdx(name) } if !exists { diff --git a/plugins/vpp/ifplugin/ifaceidx/cache_iface.go b/plugins/vpp/ifplugin/ifaceidx/cache_iface.go index 27fa0e0c76..5cf0e867c4 100644 --- a/plugins/vpp/ifplugin/ifaceidx/cache_iface.go +++ b/plugins/vpp/ifplugin/ifaceidx/cache_iface.go @@ -31,7 +31,7 @@ func Cache(watcher datasync.KeyValProtoWatcher) SwIfIndex { swIdx := NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), resyncName, IndexMetadata)) helper := cacheutil.CacheHelper{ - Prefix: interfaces.InterfaceKeyPrefix(), + Prefix: interfaces.Prefix, IDX: swIdx.GetMapping(), DataPrototype: &interfaces.Interfaces_Interface{Name: "aaa"}, ParseName: interfaces.ParseNameFromKey} diff --git a/plugins/vpp/l2plugin/l2idx/cache_bd.go b/plugins/vpp/l2plugin/l2idx/cache_bd.go index 6ef6ee9ab9..5e1b624182 100644 --- a/plugins/vpp/l2plugin/l2idx/cache_bd.go +++ b/plugins/vpp/l2plugin/l2idx/cache_bd.go @@ -30,7 +30,7 @@ func Cache(watcher datasync.KeyValProtoWatcher) BDIndex { bdIdx := NewBDIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), resyncName, IndexMetadata)) helper := cacheutil.CacheHelper{ - Prefix: l2.BridgeDomainKeyPrefix(), + Prefix: l2.BdPrefix, IDX: bdIdx.GetMapping(), DataPrototype: &l2.BridgeDomains_BridgeDomain{}, ParseName: l2.ParseBDNameFromKey} diff --git a/plugins/vpp/model/acl/keys_agent_acl.go b/plugins/vpp/model/acl/keys_agent_acl.go index 24a8f5d125..02e4d56357 100644 --- a/plugins/vpp/model/acl/keys_agent_acl.go +++ b/plugins/vpp/model/acl/keys_agent_acl.go @@ -15,17 +15,12 @@ package acl const ( - // DB key prefix - aclPrefix = "vpp/config/v1/acl/" + // ACL key prefix + Prefix = "vpp/config/v1/acl/" ) -// KeyPrefix returns the prefix used in ETCD to store vpp ACLs config. -func KeyPrefix() string { - return aclPrefix -} - // Key returns the prefix used in ETCD to store vpp ACL config // of a particular ACL in selected vpp instance. func Key(aclName string) string { - return aclPrefix + aclName + return Prefix + aclName } diff --git a/plugins/vpp/model/bfd/keys_agent.go b/plugins/vpp/model/bfd/keys_agent.go index 754b7f63ae..00663f46f4 100644 --- a/plugins/vpp/model/bfd/keys_agent.go +++ b/plugins/vpp/model/bfd/keys_agent.go @@ -16,42 +16,27 @@ package bfd const ( // bfdSessionPrefix bfd-session/ - bfdSessionPrefix = "vpp/config/v1/bfd/session/" + SessionPrefix = "vpp/config/v1/bfd/session/" // bfdAuthKeysPrefix bfd-key/ - bfdAuthKeysPrefix = "vpp/config/v1/bfd/auth-key/" + AuthKeysPrefix = "vpp/config/v1/bfd/auth-key/" // BfdEchoFunctionPrefix bfd-echo-function/ - bfdEchoFunctionPrefix = "vpp/config/v1/bfd/echo-function" + EchoFunctionPrefix = "vpp/config/v1/bfd/echo-function" ) -// SessionKeyPrefix returns the prefix used in ETCD to store vpp bfd config. -func SessionKeyPrefix() string { - return bfdSessionPrefix -} - // SessionKey returns the prefix used in ETCD to store vpp bfd config // of a particular bfd session in selected vpp instance. func SessionKey(bfdSessionIfaceLabel string) string { - return bfdSessionPrefix + bfdSessionIfaceLabel -} - -// AuthKeysKeyPrefix returns the prefix used in ETCD to store vpp bfd config. -func AuthKeysKeyPrefix() string { - return bfdAuthKeysPrefix + return SessionPrefix + bfdSessionIfaceLabel } // AuthKeysKey returns the prefix used in ETCD to store vpp bfd config // of a particular bfd key in selected vpp instance. func AuthKeysKey(bfdKeyIDLabel string) string { - return bfdAuthKeysPrefix + bfdKeyIDLabel -} - -// EchoFunctionKeyPrefix returns the prefix used in ETCD to store vpp bfd config. -func EchoFunctionKeyPrefix() string { - return bfdEchoFunctionPrefix + return AuthKeysPrefix + bfdKeyIDLabel } // EchoFunctionKey returns the prefix used in ETCD to store vpp bfd config // of a particular bfd echo function in selected vpp instance. func EchoFunctionKey(bfdEchoIfaceLabel string) string { - return bfdEchoFunctionPrefix + bfdEchoIfaceLabel + return EchoFunctionPrefix + bfdEchoIfaceLabel } diff --git a/plugins/vpp/model/interfaces/keys_agent_interfaces.go b/plugins/vpp/model/interfaces/keys_agent_interfaces.go index 88fce87b19..b4f100a5f2 100644 --- a/plugins/vpp/model/interfaces/keys_agent_interfaces.go +++ b/plugins/vpp/model/interfaces/keys_agent_interfaces.go @@ -21,18 +21,13 @@ import ( const ( // interfacePrefix is interface prefix - interfacePrefix = "vpp/config/v1/interface/" + Prefix = "vpp/config/v1/interface/" // ifStatePrefix is interface state prefix - ifStatePrefix = "vpp/status/v1/interface/" + StatePrefix = "vpp/status/v1/interface/" // ifErrorPrefix is interface error prefix - ifErrorPrefix = "vpp/status/v1/interface/error/" + ErrorPrefix = "vpp/status/v1/interface/error/" ) -// InterfaceKeyPrefix returns the prefix used in ETCD to store vpp interfaces config. -func InterfaceKeyPrefix() string { - return interfacePrefix -} - // ParseNameFromKey returns suffix of the key. func ParseNameFromKey(key string) (name string, err error) { lastSlashPos := strings.LastIndex(key, "/") @@ -46,26 +41,16 @@ func ParseNameFromKey(key string) (name string, err error) { // InterfaceKey returns the prefix used in ETCD to store the vpp interface config // of a particular interface in selected vpp instance. func InterfaceKey(ifaceLabel string) string { - return interfacePrefix + ifaceLabel -} - -// InterfaceErrorPrefix returns the prefix used in ETCD to store the interface errors. -func InterfaceErrorPrefix() string { - return ifErrorPrefix + return Prefix + ifaceLabel } // InterfaceErrorKey returns the key used in ETCD to store the interface errors. func InterfaceErrorKey(ifaceLabel string) string { - return ifErrorPrefix + ifaceLabel -} - -// InterfaceStateKeyPrefix returns the prefix used in ETCD to store the vpp interfaces state data. -func InterfaceStateKeyPrefix() string { - return ifStatePrefix + return ErrorPrefix + ifaceLabel } // InterfaceStateKey returns the prefix used in ETCD to store the vpp interface state data // of particular interface in selected vpp instance. func InterfaceStateKey(ifaceLabel string) string { - return ifStatePrefix + ifaceLabel + return StatePrefix + ifaceLabel } diff --git a/plugins/vpp/model/l2/keys_agent_l2.go b/plugins/vpp/model/l2/keys_agent_l2.go index 678da93287..a353686292 100644 --- a/plugins/vpp/model/l2/keys_agent_l2.go +++ b/plugins/vpp/model/l2/keys_agent_l2.go @@ -22,47 +22,32 @@ import ( // Prefixes const ( // bdPrefix is the relative key prefix for bridge domains. - bdPrefix = "vpp/config/v1/bd/" + BdPrefix = "vpp/config/v1/bd/" // bdStatePrefix is the relative key prefix for bridge domain state. - bdStatePrefix = "vpp/status/v1/bd/" + BdStatePrefix = "vpp/status/v1/bd/" // bdErrPrefix is the relative key prefix for the bridge domain error. - bdErrPrefix = "vpp/status/v1/bd/error/" + BdErrPrefix = "vpp/status/v1/bd/error/" // fibPrefix is the relative key prefix for FIB table entries. - fibPrefix = "vpp/config/v1/bd/{bd}/fib/" + FibPrefix = "vpp/config/v1/bd/{bd}/fib/" // xConnectPrefix is the relative key prefix for xconnects. - xConnectPrefix = "vpp/config/v1/xconnect/" + XConnectPrefix = "vpp/config/v1/xconnect/" ) -// BridgeDomainKeyPrefix returns the prefix used in ETCD to store vpp bridge domain config. -func BridgeDomainKeyPrefix() string { - return bdPrefix -} - // BridgeDomainKey returns the prefix used in ETCD to store vpp bridge domain config // of a particular bridge domain in selected vpp instance. func BridgeDomainKey(bdName string) string { - return bdPrefix + bdName -} - -// BridgeDomainStateKeyPrefix returns the prefix used in ETCD to store vpp bridge domain state data. -func BridgeDomainStateKeyPrefix() string { - return bdStatePrefix + return BdPrefix + bdName } // BridgeDomainStateKey returns the prefix used in ETCD to store vpp bridge domain state data // of a particular bridge domain in selected vpp instance. func BridgeDomainStateKey(ifaceLabel string) string { - return bdStatePrefix + ifaceLabel -} - -// BridgeDomainErrorPrefix returns the prefix used in ETCD to store bridge domain errors. -func BridgeDomainErrorPrefix() string { - return bdErrPrefix + return BdStatePrefix + ifaceLabel } // BridgeDomainErrorKey returns the key used in ETCD to store bridge domain errors. func BridgeDomainErrorKey(bdLabel string) string { - return bdErrPrefix + bdLabel + return BdErrPrefix + bdLabel } // ParseBDNameFromKey returns suffix of the key. @@ -75,21 +60,16 @@ func ParseBDNameFromKey(key string) (name string, err error) { return key, fmt.Errorf("wrong format of the key %s", key) } -// FibKeyPrefix returns the prefix used in ETCD to store vpp fib table entry config. -func FibKeyPrefix() string { - return fibPrefix -} - // FibKey returns the prefix used in ETCD to store vpp fib table entry config // of a particular fib in selected vpp instance. func FibKey(bdLabel string, fibMac string) string { - return strings.Replace(fibPrefix, "{bd}", bdLabel, 1) + fibMac + return strings.Replace(FibPrefix, "{bd}", bdLabel, 1) + fibMac } // ParseFibKey parses bridge domain label and FIB MAC address from a FIB key. func ParseFibKey(key string) (isFibKey bool, bdName string, fibMac string) { - if strings.HasPrefix(key, BridgeDomainKeyPrefix()) { - bdSuffix := strings.TrimPrefix(key, BridgeDomainKeyPrefix()) + if strings.HasPrefix(key, BdPrefix) { + bdSuffix := strings.TrimPrefix(key, BdPrefix) fibComps := strings.Split(bdSuffix, "/") if len(fibComps) == 3 && fibComps[1] == "fib" { return true, fibComps[0], fibComps[2] @@ -98,13 +78,8 @@ func ParseFibKey(key string) (isFibKey bool, bdName string, fibMac string) { return false, "", "" } -// XConnectKeyPrefix returns the prefix used in ETCD to store vpp xConnect pair config. -func XConnectKeyPrefix() string { - return xConnectPrefix -} - // XConnectKey returns the prefix used in ETCD to store vpp xConnect pair config // of particular xConnect pair in selected vpp instance. func XConnectKey(rxIface string) string { - return xConnectPrefix + rxIface + return XConnectPrefix + rxIface } diff --git a/plugins/vpp/watch_events.go b/plugins/vpp/watch_events.go index 7c4cb57afc..79fbf48eeb 100644 --- a/plugins/vpp/watch_events.go +++ b/plugins/vpp/watch_events.go @@ -98,7 +98,7 @@ func (plugin *Plugin) onStatusResyncEvent(e datasync.ResyncEvent) { var wasError error for key, vals := range e.GetValues() { plugin.Log.Debugf("trying to delete obsolete status for key %v begin ", key) - if strings.HasPrefix(key, interfaces.InterfaceStateKeyPrefix()) { + if strings.HasPrefix(key, interfaces.StatePrefix) { var keys []string for { x, stop := vals.GetNext() @@ -113,7 +113,7 @@ func (plugin *Plugin) onStatusResyncEvent(e datasync.ResyncEvent) { wasError = err } } - } else if strings.HasPrefix(key, l2.BridgeDomainStateKeyPrefix()) { + } else if strings.HasPrefix(key, l2.BdStatePrefix) { var keys []string for { x, stop := vals.GetNext() From 39dd22768f94ee67b69b11f89dd246ce0613fdf6 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Thu, 26 Jul 2018 12:47:00 +0200 Subject: [PATCH 057/174] added mutex to http handler registrator Signed-off-by: Vladimir Lavor --- plugins/rest/plugin_impl_rest.go | 22 ++++++-------- plugins/rest/rest_handlers.go | 29 ++++++++----------- plugins/rest/resturl/rest_url.go | 16 +++++----- .../ifplugin/vppcalls/dump_bfd_vppcalls.go | 8 ++--- .../vppcalls/dump_interface_vppcalls.go | 5 ++-- .../vpp/l2plugin/vppcalls/dump_vppcalls.go | 16 +++++----- 6 files changed, 43 insertions(+), 53 deletions(-) diff --git a/plugins/rest/plugin_impl_rest.go b/plugins/rest/plugin_impl_rest.go index 6af1234cb9..8c7db23945 100644 --- a/plugins/rest/plugin_impl_rest.go +++ b/plugins/rest/plugin_impl_rest.go @@ -17,16 +17,18 @@ package rest import ( "fmt" + "sync" + "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/flavors/local" "github.com/ligato/cn-infra/rpc/rest" "github.com/ligato/cn-infra/utils/safeclose" "github.com/ligato/vpp-agent/plugins/govppmux" + "github.com/ligato/vpp-agent/plugins/rest/resturl" "github.com/ligato/vpp-agent/plugins/vpp" aclvppcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" l2vppcalls "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" - "github.com/ligato/vpp-agent/plugins/rest/resturl" ) const ( @@ -55,6 +57,8 @@ type Plugin struct { bdHandler l2vppcalls.BridgeDomainVppRead fibHandler l2vppcalls.FibVppRead xcHandler l2vppcalls.XConnectVppRead + + sync.Mutex } // Deps represents dependencies of Rest Plugin @@ -135,18 +139,10 @@ func (plugin *Plugin) Init() (err error) { func (plugin *Plugin) AfterInit() (err error) { plugin.Log.Debug("REST API Plugin is up and running") - if err := plugin.registerAccessListHandlers(); err != nil { - return err - } - if err := plugin.registerInterfaceHandlers(); err != nil { - return err - } - if err := plugin.registerBfdHandlers(); err != nil { - return err - } - if err := plugin.registerL2Handlers(); err != nil { - return err - } + plugin.registerAccessListHandlers() + plugin.registerInterfaceHandlers() + plugin.registerBfdHandlers() + plugin.registerL2Handlers() plugin.HTTPHandlers.RegisterHTTPHandler("/arps", plugin.arpGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler("/staticroutes", plugin.staticRoutesGetHandler, "GET") diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index 0d01198789..584b3204ff 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -29,33 +29,31 @@ import ( "github.com/ligato/vpp-agent/plugins/govppmux/vppcalls" "github.com/unrolled/render" + "github.com/ligato/vpp-agent/plugins/rest/resturl" aclcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" l3plugin "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/acl" "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/rest/resturl" ) // Registers access list REST handlers -func (plugin *Plugin) registerAccessListHandlers() error { +func (plugin *Plugin) registerAccessListHandlers() { // GET IP ACLs plugin.registerHTTPHandler(resturl.AclIP, GET, func() (interface{}, error) { return plugin.aclHandler.DumpIPACL(nil) }) // GET MACIP ACLs plugin.registerHTTPHandler(resturl.AclMACIP, GET, func() (interface{}, error) { - return plugin.aclHandler.DumpMacIPAcls() + return plugin.aclHandler.DumpMACIPACL(nil) }) // GET IP ACL example plugin.HTTPHandlers.RegisterHTTPHandler(resturl.AclIPExample, plugin.exampleIpACLGetHandler, GET) // GET MACIP ACL example plugin.HTTPHandlers.RegisterHTTPHandler(resturl.AclMACIPExample, plugin.exampleMacIpACLGetHandler, GET) - - return nil } // Registers interface REST handlers -func (plugin *Plugin) registerInterfaceHandlers() error { +func (plugin *Plugin) registerInterfaceHandlers() { // GET all interfaces plugin.registerHTTPHandler(resturl.Interface, GET, func() (interface{}, error) { return plugin.ifHandler.DumpInterfaces() @@ -120,29 +118,25 @@ func (plugin *Plugin) registerInterfaceHandlers() error { } return ifs, err }) - - return nil } -func (plugin *Plugin) registerBfdHandlers() error { +func (plugin *Plugin) registerBfdHandlers() { // GET BFD configuration - plugin.registerHTTPHandler(resturl.BfdKey, GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.BfdUrl, GET, func() (interface{}, error) { return plugin.bfdHandler.DumpBfdSingleHop() }) // GET BFD sessions - plugin.registerHTTPHandler(resturl.BfdSessionKey, GET, func() (interface{}, error) { + plugin.registerHTTPHandler(resturl.BfdSession, GET, func() (interface{}, error) { return plugin.bfdHandler.DumpBfdSessions() }) // GET BFD authentication keys plugin.registerHTTPHandler(resturl.BfdAuthKey, GET, func() (interface{}, error) { return plugin.bfdHandler.DumpBfdAuthKeys() }) - - return nil } // Registers L2 plugin REST handlers -func (plugin *Plugin) registerL2Handlers() error { +func (plugin *Plugin) registerL2Handlers() { // GET bridge domain IDs plugin.registerHTTPHandler(resturl.BdId, GET, func() (interface{}, error) { return plugin.bdHandler.DumpBridgeDomainIDs() @@ -159,14 +153,15 @@ func (plugin *Plugin) registerL2Handlers() error { plugin.registerHTTPHandler(resturl.Xc, GET, func() (interface{}, error) { return plugin.xcHandler.DumpXConnectPairs() }) - - return nil } // registerHTTPHandler is common register method for all handlers func (plugin *Plugin) registerHTTPHandler(key, method string, f func() (interface{}, error)) { handlerFunc := func(formatter *render.Render) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { + plugin.Lock() + defer plugin.Unlock() + res, err := f() if err != nil { plugin.Deps.Log.Errorf("Error: %v", err) @@ -174,7 +169,7 @@ func (plugin *Plugin) registerHTTPHandler(key, method string, f func() (interfac formatter.JSON(w, http.StatusInternalServerError, err) return } - plugin.Deps.Log.Debug(res) + plugin.Deps.Log.Debug("Rest uri: %s, data: %v", key, res) formatter.JSON(w, http.StatusOK, res) } } diff --git a/plugins/rest/resturl/rest_url.go b/plugins/rest/resturl/rest_url.go index 7fc399eea3..bc1c523672 100644 --- a/plugins/rest/resturl/rest_url.go +++ b/plugins/rest/resturl/rest_url.go @@ -14,7 +14,7 @@ package resturl -// Access list REST keys +// Access list REST urls const ( // REST Acl IP prefix AclIP = "/vpp/v1/acl/ip" @@ -26,17 +26,17 @@ const ( AclMACIPExample = "/vpp/v1/acl/macip/example" ) -// BFD REST keys +// BFD REST urls const ( - // restBfdKey is a REST path of a bfd - BfdKey = "/vpp/v1/bfd" - // restBfdSessionKey is a REST path of a bfd sessions - BfdSessionKey = "/vpp/v1/bfd/sessions" - // restBfdAuthKey is a REST path of a bfd authentication keys + // BfdUrl is a REST path of a bfd + BfdUrl = "/vpp/v1/bfd" + // BfdSession is a REST path of a bfd sessions + BfdSession = "/vpp/v1/bfd/sessions" + // BfdAuthKey is a REST path of a bfd authentication keys BfdAuthKey = "/vpp/v1/bfd/authkeys" ) -// Interface REST keys +// Interface REST urls const ( // restInterface is rest interface path Interface = "/vpp/v1/interfaces" diff --git a/plugins/vpp/ifplugin/vppcalls/dump_bfd_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/dump_bfd_vppcalls.go index 46495661ce..03e819baa8 100644 --- a/plugins/vpp/ifplugin/vppcalls/dump_bfd_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_bfd_vppcalls.go @@ -24,14 +24,14 @@ import ( // BfdDetails is the wrapper structure for the BFD northbound API structure. type BfdDetails struct { - Bfd *bfd.SingleHopBFD - Meta *BfdMeta + Bfd *bfd.SingleHopBFD `json:"bfd"` + Meta *BfdMeta `json:"bfd_meta"` } // BfdMeta is combination of proto-modelled BFD data and VPP provided metadata type BfdMeta struct { - *BfdSessionMeta - *BfdAuthKeyMeta + *BfdSessionMeta `json:"bfd_session_meta"` + *BfdAuthKeyMeta `json:"bfd_authkey_meta"` } func (handler *bfdVppHandler) DumpBfdSingleHop() (*BfdDetails, error) { diff --git a/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go index 1d60ea65e6..37097a9d87 100644 --- a/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go @@ -37,8 +37,8 @@ const defaultVPPMtu = 9216 // InterfaceDetails is the wrapper structure for the interface northbound API structure. type InterfaceDetails struct { - Interface *ifnb.Interfaces_Interface - Meta *InterfaceMeta + Interface *ifnb.Interfaces_Interface `json:"interface"` + Meta *InterfaceMeta `json:"interface_meta"` } // InterfaceMeta is combination of proto-modelled Interface data and VPP provided metadata @@ -107,7 +107,6 @@ func (handler *ifVppHandler) DumpInterfaces() (map[uint32]*InterfaceDetails, err ifData.Interface.Vrf = vrf } - handler.log.Debugf("dumped %d interfaces", len(ifs)) // SwInterfaceDump time diff --git a/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go index 46f4b1ad10..35977c4bd7 100644 --- a/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go +++ b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go @@ -27,13 +27,13 @@ import ( // NOTE: Interfaces in BridgeDomains_BridgeDomain is overridden by the local Interfaces member. type BridgeDomainDetails struct { Bd *l2nb.BridgeDomains_BridgeDomain `json:"bridge_domain"` - Meta *BridgeDomainMeta + Meta *BridgeDomainMeta `json:"bridge_domain_meta"` } // BridgeDomainMeta contains bridge domain interface name/index map type BridgeDomainMeta struct { - BdID uint32 - BdIfIdxToName map[uint32]string + BdID uint32 `json:"bridge_domain_id"` + BdIfIdxToName map[uint32]string `json:"bridge_domain_id_to_name"` } func (handler *bridgeDomainVppHandler) DumpBridgeDomains() (map[uint32]*BridgeDomainDetails, error) { @@ -125,13 +125,13 @@ func (handler *bridgeDomainVppHandler) DumpBridgeDomainIDs() ([]uint32, error) { // FIBTableDetails is the wrapper structure for the FIB table entry northbound API structure. type FibTableDetails struct { - Fib *l2nb.FibTable_FibEntry - Meta *FibMeta + Fib *l2nb.FibTable_FibEntry `json:"fib"` + Meta *FibMeta `json:"fib_meta"` } // FibMeta contains FIB interface and bridge domain name/index map type FibMeta struct { - BdID uint32 `json:"bridge_domain_idx"` + BdID uint32 `json:"bridge_domain_id"` IfIdx uint32 `json:"outgoing_interface_sw_if_idx"` } @@ -194,8 +194,8 @@ func (handler *fibVppHandler) DumpFIBTableEntries() (map[string]*FibTableDetails // XConnectDetails is the wrapper structure for the l2 xconnect northbound API structure. type XConnectDetails struct { - Xc *l2nb.XConnectPairs_XConnectPair - Meta *XcMeta + Xc *l2nb.XConnectPairs_XConnectPair `json:"x_connect"` + Meta *XcMeta `json:"x_connect_meta"` } // XcMeta contains cross connect rx/tx interface indexes From fe6e2b788c0b1c5eb20ea6397389e740e7cf13c1 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Thu, 26 Jul 2018 13:58:08 +0200 Subject: [PATCH 058/174] added "DumpInterfaceByType" to ifplugin api Signed-off-by: Vladimir Lavor --- plugins/rest/rest_handlers.go | 48 +++---------------- plugins/vpp/ifplugin/vppcalls/api_vppcalls.go | 2 + .../vppcalls/dump_interface_vppcalls.go | 16 +++++++ 3 files changed, 24 insertions(+), 42 deletions(-) diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index 584b3204ff..885b5804cb 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -60,63 +60,27 @@ func (plugin *Plugin) registerInterfaceHandlers() { }) // GET loopback interfaces plugin.registerHTTPHandler(resturl.Loopback, GET, func() (interface{}, error) { - ifs, err := plugin.ifHandler.DumpInterfaces() - for ifKey, ifConfig := range ifs { - if ifConfig.Interface.Type != interfaces.InterfaceType_SOFTWARE_LOOPBACK { - delete(ifs, ifKey) - } - } - return ifs, err + return plugin.ifHandler.DumpInterfacesByType(interfaces.InterfaceType_SOFTWARE_LOOPBACK) }) // GET ethernet interfaces plugin.registerHTTPHandler(resturl.Ethernet, GET, func() (interface{}, error) { - ifs, err := plugin.ifHandler.DumpInterfaces() - for ifKey, ifConfig := range ifs { - if ifConfig.Interface.Type != interfaces.InterfaceType_ETHERNET_CSMACD { - delete(ifs, ifKey) - } - } - return ifs, err + return plugin.ifHandler.DumpInterfacesByType(interfaces.InterfaceType_ETHERNET_CSMACD) }) // GET memif interfaces plugin.registerHTTPHandler(resturl.Memif, GET, func() (interface{}, error) { - ifs, err := plugin.ifHandler.DumpInterfaces() - for ifKey, ifConfig := range ifs { - if ifConfig.Interface.Type != interfaces.InterfaceType_MEMORY_INTERFACE { - delete(ifs, ifKey) - } - } - return ifs, err + return plugin.ifHandler.DumpInterfacesByType(interfaces.InterfaceType_MEMORY_INTERFACE) }) // GET tap interfaces plugin.registerHTTPHandler(resturl.Tap, GET, func() (interface{}, error) { - ifs, err := plugin.ifHandler.DumpInterfaces() - for ifKey, ifConfig := range ifs { - if ifConfig.Interface.Type != interfaces.InterfaceType_TAP_INTERFACE { - delete(ifs, ifKey) - } - } - return ifs, err + return plugin.ifHandler.DumpInterfacesByType(interfaces.InterfaceType_TAP_INTERFACE) }) // GET af-packet interfaces plugin.registerHTTPHandler(resturl.AfPacket, GET, func() (interface{}, error) { - ifs, err := plugin.ifHandler.DumpInterfaces() - for ifKey, ifConfig := range ifs { - if ifConfig.Interface.Type != interfaces.InterfaceType_AF_PACKET_INTERFACE { - delete(ifs, ifKey) - } - } - return ifs, err + return plugin.ifHandler.DumpInterfacesByType(interfaces.InterfaceType_AF_PACKET_INTERFACE) }) // GET VxLAN interfaces plugin.registerHTTPHandler(resturl.VxLan, GET, func() (interface{}, error) { - ifs, err := plugin.ifHandler.DumpInterfaces() - for ifKey, ifConfig := range ifs { - if ifConfig.Interface.Type != interfaces.InterfaceType_VXLAN_TUNNEL { - delete(ifs, ifKey) - } - } - return ifs, err + return plugin.ifHandler.DumpInterfacesByType(interfaces.InterfaceType_VXLAN_TUNNEL) }) } diff --git a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go index 5988bf6b51..0712d360bb 100644 --- a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go @@ -106,6 +106,8 @@ type IfVppRead interface { // - there is no af_packet dump binary API. We relay on naming conventions of the internal VPP interface names // - ip.IPAddressDetails has wrong internal structure, as a workaround we need to handle them as notifications DumpInterfaces() (map[uint32]*InterfaceDetails, error) + // DumpInterfacesByType returns all VPP interfaces of the specified type + DumpInterfacesByType(reqType interfaces.InterfaceType) (map[uint32]*InterfaceDetails, error) // GetInterfaceVRF assigns VRF table to interface GetInterfaceVRF(ifIdx uint32) (vrfID uint32, err error) // DumpMemifSocketDetails dumps memif socket details from the VPP diff --git a/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go index 37097a9d87..82b5691405 100644 --- a/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_interface_vppcalls.go @@ -48,6 +48,22 @@ type InterfaceMeta struct { InternalName string `json:"internal_name"` } +func (handler *ifVppHandler) DumpInterfacesByType(reqType ifnb.InterfaceType) (map[uint32]*InterfaceDetails, error) { + // Dump all + ifs, err := handler.DumpInterfaces() + if err != nil { + return nil, err + } + // Filter by type + for ifIdx, ifData := range ifs { + if ifData.Interface.Type != reqType { + delete(ifs, ifIdx) + } + } + + return ifs, nil +} + func (handler *ifVppHandler) DumpInterfaces() (map[uint32]*InterfaceDetails, error) { start := time.Now() // map for the resulting interfaces From 54063d7d9cb14a522fda47392710696febf336ff Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Thu, 26 Jul 2018 15:49:59 +0200 Subject: [PATCH 059/174] update keys + remove acl example from rest Signed-off-by: Vladimir Lavor --- plugins/rest/README.md | 39 +++++++---------- plugins/rest/rest_handlers.go | 74 -------------------------------- plugins/rest/resturl/rest_url.go | 36 +++++++--------- 3 files changed, 32 insertions(+), 117 deletions(-) diff --git a/plugins/rest/README.md b/plugins/rest/README.md index 32c7d2283c..4d212c8b0b 100644 --- a/plugins/rest/README.md +++ b/plugins/rest/README.md @@ -21,16 +21,9 @@ interfaces or ACLs, internal names, etc.). Those data are in separate section la URLs to obtain ACL IP/MACIP configuration are as follows. ``` -curl http://0.0.0.0:9191/vpp/v1/acl/ip -curl http://0.0.0.0:9191/vpp/v1/acl/macip +curl http://0.0.0.0:9191/vpp/dump/v1/acl/ip +curl http://0.0.0.0:9191/vpp/dump/v1/acl/macip ``` - -It is also possible to obtain ACL example configuration (no configured on the VPP). - -``` -curl http://0.0.0.0:9191/vpp/v1/acl/ip/example -curl http://0.0.0.0:9191/vpp/v1/acl/macip/example -``` **Interfaces** @@ -38,13 +31,13 @@ REST plugin exposes configured interfaces, which can be show all together, or on of specific type. ``` -curl http://0.0.0.0:9191/vpp/v1/interfaces -curl http://0.0.0.0:9191/vpp/v1/interfaces/loopback -curl http://0.0.0.0:9191/vpp/v1/interfaces/ethernet -curl http://0.0.0.0:9191/vpp/v1/interfaces/memif -curl http://0.0.0.0:9191/vpp/v1/interfaces/tap -curl http://0.0.0.0:9191/vpp/v1/interfaces/vxlan -curl http://0.0.0.0:9191/vpp/v1/interfaces/afpacket +curl http://0.0.0.0:9191/vpp/dump/v1/interfaces +curl http://0.0.0.0:9191/vpp/dump/v1/interfaces/loopback +curl http://0.0.0.0:9191/vpp/dump/v1/interfaces/ethernet +curl http://0.0.0.0:9191/vpp/dump/v1/interfaces/memif +curl http://0.0.0.0:9191/vpp/dump/v1/interfaces/tap +curl http://0.0.0.0:9191/vpp/dump/v1/interfaces/vxlan +curl http://0.0.0.0:9191/vpp/dump/v1/interfaces/afpacket ``` **BFD** @@ -53,9 +46,9 @@ REST plugin allows to dump bidirectional forwarding detection sessions, authenti or the whole configuration. ``` -curl http://0.0.0.0:9191/vpp/v1/bfd -curl http://0.0.0.0:9191/vpp/v1/bfd/sessions -curl http://0.0.0.0:9191/vpp/v1/bfd/authkeys +curl http://0.0.0.0:9191/vpp/dump/v1/bfd +curl http://0.0.0.0:9191/vpp/dump/v1/bfd/sessions +curl http://0.0.0.0:9191/vpp/dump/v1/bfd/authkeys ``` **L2 plugin** @@ -64,10 +57,10 @@ Support for bridge domains, FIBs and cross connects. It is also possible to get the bridge domain IDs. ``` -curl http://0.0.0.0:9191/vpp/v1/bdid -curl http://0.0.0.0:9191/vpp/v1/bd -curl http://0.0.0.0:9191/vpp/v1/fibs -curl http://0.0.0.0:9191/vpp/v1/xc +curl http://0.0.0.0:9191/vpp/dump/v1/bdid +curl http://0.0.0.0:9191/vpp/dump/v1/bd +curl http://0.0.0.0:9191/vpp/dump/v1/fibs +curl http://0.0.0.0:9191/vpp/dump/v1/xc ``` **L3 plugin** diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index 885b5804cb..2bd4c916f1 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -46,10 +46,6 @@ func (plugin *Plugin) registerAccessListHandlers() { plugin.registerHTTPHandler(resturl.AclMACIP, GET, func() (interface{}, error) { return plugin.aclHandler.DumpMACIPACL(nil) }) - // GET IP ACL example - plugin.HTTPHandlers.RegisterHTTPHandler(resturl.AclIPExample, plugin.exampleIpACLGetHandler, GET) - // GET MACIP ACL example - plugin.HTTPHandlers.RegisterHTTPHandler(resturl.AclMACIPExample, plugin.exampleMacIpACLGetHandler, GET) } // Registers interface REST handlers @@ -252,76 +248,6 @@ func (plugin *Plugin) interfaceACLGetHandler(formatter *render.Render) http.Hand } } -// exampleACLGetHandler - used to get an example ACL configuration -func (plugin *Plugin) exampleIpACLGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - - plugin.Log.Debug("Getting example acl") - - ipRule := &acl.AccessLists_Acl_Rule_Match_IpRule{ - Ip: &acl.AccessLists_Acl_Rule_Match_IpRule_Ip{ - DestinationNetwork: "1.2.3.4/24", - SourceNetwork: "5.6.7.8/24", - }, - Tcp: &acl.AccessLists_Acl_Rule_Match_IpRule_Tcp{ - DestinationPortRange: &acl.AccessLists_Acl_Rule_Match_IpRule_PortRange{ - LowerPort: 80, - UpperPort: 8080, - }, - SourcePortRange: &acl.AccessLists_Acl_Rule_Match_IpRule_PortRange{ - LowerPort: 10, - UpperPort: 1010, - }, - TcpFlagsMask: 0xFF, - TcpFlagsValue: 9, - }, - } - - rule := &acl.AccessLists_Acl_Rule{ - Match: &acl.AccessLists_Acl_Rule_Match{ - IpRule: ipRule, - }, - AclAction: acl.AclAction_PERMIT, - } - - aclRes := acl.AccessLists_Acl{ - AclName: "example", - Rules: []*acl.AccessLists_Acl_Rule{rule}, - } - - plugin.Log.Debug(aclRes) - formatter.JSON(w, http.StatusOK, aclRes) - } -} - -func (plugin *Plugin) exampleMacIpACLGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - plugin.Deps.Log.Info("Getting example macip acl") - - macipRule := &acl.AccessLists_Acl_Rule_Match_MacIpRule{ - SourceAddress: "192.168.0.1", - SourceAddressPrefix: uint32(16), - SourceMacAddress: "02:00:DE:AD:00:02", - SourceMacAddressMask: "ff:ff:ff:ff:00:00", - } - - rule := &acl.AccessLists_Acl_Rule{ - Match: &acl.AccessLists_Acl_Rule_Match{ - MacipRule: macipRule, - }, - AclAction: acl.AclAction_PERMIT, - } - - aclRes := acl.AccessLists_Acl{ - AclName: "example", - Rules: []*acl.AccessLists_Acl_Rule{rule}, - } - - plugin.Deps.Log.Debug(aclRes) - formatter.JSON(w, http.StatusOK, aclRes) - } -} - // ipACLPostHandler - used to get acl configuration for a particular interface func (plugin *Plugin) ipACLPostHandler(formatter *render.Render) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { diff --git a/plugins/rest/resturl/rest_url.go b/plugins/rest/resturl/rest_url.go index bc1c523672..72c7086602 100644 --- a/plugins/rest/resturl/rest_url.go +++ b/plugins/rest/resturl/rest_url.go @@ -17,51 +17,47 @@ package resturl // Access list REST urls const ( // REST Acl IP prefix - AclIP = "/vpp/v1/acl/ip" - // REST Acl IP example prefix - AclIPExample = "/vpp/v1/acl/ip/example" + AclIP = "/vpp/dump/v1/acl/ip" // REST Acl MACIP prefix - AclMACIP = "/vpp/v1/acl/macip" - // REST Acl MACIP example prefix - AclMACIPExample = "/vpp/v1/acl/macip/example" + AclMACIP = "/vpp/dump/v1/acl/macip" ) // BFD REST urls const ( // BfdUrl is a REST path of a bfd - BfdUrl = "/vpp/v1/bfd" + BfdUrl = "/vpp/dump/v1/bfd" // BfdSession is a REST path of a bfd sessions - BfdSession = "/vpp/v1/bfd/sessions" + BfdSession = "/vpp/dump/v1/bfd/sessions" // BfdAuthKey is a REST path of a bfd authentication keys - BfdAuthKey = "/vpp/v1/bfd/authkeys" + BfdAuthKey = "/vpp/dump/v1/bfd/authkeys" ) // Interface REST urls const ( // restInterface is rest interface path - Interface = "/vpp/v1/interfaces" + Interface = "/vpp/dump/v1/interfaces" // restLoopback is path for loopback interface - Loopback = "/vpp/v1/interfaces/loopback" + Loopback = "/vpp/dump/v1/interfaces/loopback" // restLoopback is path for physical interface - Ethernet = "/vpp/v1/interfaces/ethernet" + Ethernet = "/vpp/dump/v1/interfaces/ethernet" // restLoopback is path for memif interface - Memif = "/vpp/v1/interfaces/memif" + Memif = "/vpp/dump/v1/interfaces/memif" // restLoopback is path for tap interface - Tap = "/vpp/v1/interfaces/tap" + Tap = "/vpp/dump/v1/interfaces/tap" // restAfPacket is path for af-packet interface - AfPacket = "/vpp/v1/interfaces/afpacket" + AfPacket = "/vpp/dump/v1/interfaces/afpacket" // restLoopback is path for vxlan interface - VxLan = "/vpp/v1/interfaces/vxlan" + VxLan = "/vpp/dump/v1/interfaces/vxlan" ) // L2 plugin const ( // restBd is rest bridge domain path - Bd = "/vpp/v1/bd" + Bd = "/vpp/dump/v1/bd" // restBdId is rest bridge domain ID path - BdId = "/vpp/v1/bdid" + BdId = "/vpp/dump/v1/bdid" // restFib is rest FIB path - Fib = "/vpp/v1/fib" + Fib = "/vpp/dump/v1/fib" // restXc is rest cross-connect path - Xc = "/vpp/v1/xc" + Xc = "/vpp/dump/v1/xc" ) From 43de31d5f9d4d42016bc2a1c3515b20496873149 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 30 Jul 2018 10:10:25 +0200 Subject: [PATCH 060/174] fix vpp route dump Signed-off-by: Vladimir Lavor --- flavors/vpp/vpp_flavor.go | 1 + plugins/rest/plugin_impl_rest.go | 10 + plugins/rest/rest_handlers.go | 7 +- plugins/vpp/l3plugin/data_resync.go | 32 ++-- plugins/vpp/l3plugin/route_config.go | 2 +- plugins/vpp/l3plugin/vppcalls/api_vppcalls.go | 7 +- .../vpp/l3plugin/vppcalls/dump_vppcalls.go | 176 ++++++++++++------ .../l3plugin/vppcalls/dump_vppcalls_test.go | 16 +- .../l3plugin/vppcalls/route_vppcalls_test.go | 5 +- 9 files changed, 180 insertions(+), 76 deletions(-) diff --git a/flavors/vpp/vpp_flavor.go b/flavors/vpp/vpp_flavor.go index b43404ed2c..6a93d2fa12 100644 --- a/flavors/vpp/vpp_flavor.go +++ b/flavors/vpp/vpp_flavor.go @@ -141,6 +141,7 @@ func (f *Flavor) Inject() bool { f.RESTAPIPlugin.Deps.PluginInfraDeps = *f.FlavorLocal.InfraDeps("rest") f.RESTAPIPlugin.Deps.HTTPHandlers = &f.FlavorRPC.HTTP f.RESTAPIPlugin.Deps.GoVppmux = &f.GoVPP + f.RESTAPIPlugin.Deps.VPP = &f.VPP f.TelemetryPlugin.Deps.PluginInfraDeps = *f.FlavorLocal.InfraDeps("telemetry") f.TelemetryPlugin.Deps.Prometheus = &f.FlavorRPC.Prometheus diff --git a/plugins/rest/plugin_impl_rest.go b/plugins/rest/plugin_impl_rest.go index 52fd42df19..f4642870d0 100644 --- a/plugins/rest/plugin_impl_rest.go +++ b/plugins/rest/plugin_impl_rest.go @@ -20,6 +20,8 @@ import ( "github.com/ligato/cn-infra/flavors/local" "github.com/ligato/cn-infra/rpc/rest" "github.com/ligato/vpp-agent/plugins/govppmux" + "github.com/ligato/vpp-agent/plugins/vpp" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" ) const ( @@ -31,6 +33,8 @@ type Plugin struct { Deps indexItems []indexItem + + ifIndexes ifaceidx.SwIfIndex } // Deps represents dependencies of Rest Plugin @@ -38,6 +42,7 @@ type Deps struct { local.PluginInfraDeps HTTPHandlers rest.HTTPHandlers GoVppmux govppmux.API + VPP vpp.API } type indexItem struct { @@ -57,6 +62,11 @@ func (plugin *Plugin) Init() (err error) { {Name: "ACL IP", Path: "/acl/ip"}, {Name: "Telemetry", Path: "/telemetry"}, } + + if plugin.VPP != nil { + plugin.ifIndexes = plugin.VPP.GetSwIfIndexes() + } + return nil } diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index cdd741d9d2..21484fe393 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -248,7 +248,12 @@ func (plugin *Plugin) staticRoutesGetHandler(formatter *render.Render) http.Hand } defer ch.Close() - l3Handler, err := l3plugin.NewRouteVppHandler(ch, plugin.Log, nil) + if plugin.ifIndexes == nil { + plugin.Log.Error("Error creating VPP handler: missing interface indexes") + formatter.JSON(w, http.StatusInternalServerError, err) + return + } + l3Handler, err := l3plugin.NewRouteVppHandler(ch, plugin.ifIndexes, plugin.Log, nil) if err != nil { plugin.Log.Errorf("Error creating VPP handler: %v", err) formatter.JSON(w, http.StatusInternalServerError, err) diff --git a/plugins/vpp/l3plugin/data_resync.go b/plugins/vpp/l3plugin/data_resync.go index c01426aa9a..7e48b20266 100644 --- a/plugins/vpp/l3plugin/data_resync.go +++ b/plugins/vpp/l3plugin/data_resync.go @@ -35,11 +35,11 @@ func (plugin *RouteConfigurator) Resync(nbRoutes []*l3.StaticRoutes_Route) error plugin.clearMapping() // Retrieve VPP route configuration - vppRoutes, err := plugin.rtHandler.DumpStaticRoutes() + vppRouteDetails, err := plugin.rtHandler.DumpStaticRoutes() if err != nil { return err } - plugin.log.Debugf("Found %d routes configured on the VPP", len(vppRoutes)) + plugin.log.Debugf("Found %d routes configured on the VPP", len(vppRouteDetails)) // Correlate NB and VPP configuration for _, nbRoute := range nbRoutes { @@ -61,27 +61,31 @@ func (plugin *RouteConfigurator) Resync(nbRoutes []*l3.StaticRoutes_Route) error nbRoute.Weight = 1 } // Look for the same route in the configuration - for _, vppRoute := range vppRoutes { - vppRouteID := routeIdentifier(vppRoute.VrfID, vppRoute.DstAddr.String(), vppRoute.NextHopAddr.String()) + for _, vppRouteDetail := range vppRouteDetails { + if vppRouteDetail.Route == nil { + continue + } + vppRoute := vppRouteDetail.Route + vppRouteID := routeIdentifier(vppRoute.VrfId, vppRoute.DstIpAddr, vppRoute.NextHopAddr) plugin.log.Debugf("RESYNC routes: comparing %s and %s", nbRouteID, vppRouteID) if int32(vppRoute.Type) != int32(nbRoute.Type) { plugin.log.Debugf("RESYNC routes: route type is different (NB: %d, VPP %d)", nbRoute.Type, vppRoute.Type) continue } - if vppRoute.OutIface != nbIfIdx { + if vppRoute.OutgoingInterface != nbRoute.OutgoingInterface { plugin.log.Debugf("RESYNC routes: interface index is different (NB: %d, VPP %d)", - nbIfIdx, vppRoute.OutIface) + nbIfIdx, vppRoute.OutgoingInterface) continue } - if vppRoute.DstAddr.String() != nbRoute.DstIpAddr { + if vppRoute.DstIpAddr != nbRoute.DstIpAddr { plugin.log.Debugf("RESYNC routes: dst address is different (NB: %s, VPP %s)", - nbRoute.DstIpAddr, vppRoute.DstAddr.String()) + nbRoute.DstIpAddr, vppRoute.DstIpAddr) continue } - if vppRoute.VrfID != nbRoute.VrfId { + if vppRoute.VrfId != nbRoute.VrfId { plugin.log.Debugf("RESYNC routes: VRF ID is different (NB: %d, VPP %d)", - nbRoute.VrfId, vppRoute.VrfID) + nbRoute.VrfId, vppRoute.VrfId) continue } if vppRoute.Weight != nbRoute.Weight { @@ -94,13 +98,13 @@ func (plugin *RouteConfigurator) Resync(nbRoutes []*l3.StaticRoutes_Route) error nbRoute.Preference, vppRoute.Preference) continue } - if vppRoute.NextHopAddr.String() != nbRoute.NextHopAddr { - if nbRoute.NextHopAddr == "" && vppRoute.NextHopAddr.IsUnspecified() { + if vppRoute.NextHopAddr != nbRoute.NextHopAddr { + if nbRoute.NextHopAddr == "" { plugin.log.Debugf("RESYNC routes: empty next hop address matched (NB: %s, VPP %s)", - nbRoute.NextHopAddr, vppRoute.NextHopAddr.String()) + nbRoute.NextHopAddr, vppRoute.NextHopAddr) } else { plugin.log.Debugf("RESYNC routes: next hop address is different (NB: %s, VPP %s)", - nbRoute.NextHopAddr, vppRoute.NextHopAddr.String()) + nbRoute.NextHopAddr, vppRoute.NextHopAddr) continue } } diff --git a/plugins/vpp/l3plugin/route_config.go b/plugins/vpp/l3plugin/route_config.go index a4bb3c7ef3..bb1758dbd1 100644 --- a/plugins/vpp/l3plugin/route_config.go +++ b/plugins/vpp/l3plugin/route_config.go @@ -85,7 +85,7 @@ func (plugin *RouteConfigurator) Init(logger logging.PluginLogger, goVppMux govp if plugin.ifHandler, err = ifvppcalls.NewIfVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch); err != nil { return err } - if plugin.rtHandler, err = vppcalls.NewRouteVppHandler(plugin.vppChan, plugin.log, plugin.stopwatch); err != nil { + if plugin.rtHandler, err = vppcalls.NewRouteVppHandler(plugin.vppChan, plugin.ifIndexes, plugin.log, plugin.stopwatch); err != nil { return err } diff --git a/plugins/vpp/l3plugin/vppcalls/api_vppcalls.go b/plugins/vpp/l3plugin/vppcalls/api_vppcalls.go index da20975814..1cfd9226f6 100644 --- a/plugins/vpp/l3plugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/l3plugin/vppcalls/api_vppcalls.go @@ -18,6 +18,7 @@ import ( govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/measure" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" ) @@ -81,7 +82,7 @@ type RouteVppWrite interface { // RouteVppRead provides read methods for routes type RouteVppRead interface { // DumpStaticRoutes dumps l3 routes from VPP and fills them into the provided static route map. - DumpStaticRoutes() ([]*Route, error) + DumpStaticRoutes() ([]*RouteDetails, error) } // arpVppHandler is accessor for ARP-related vppcalls methods @@ -102,6 +103,7 @@ type proxyArpVppHandler struct { type routeHandler struct { stopwatch *measure.Stopwatch callsChannel govppapi.Channel + ifIndexes ifaceidx.SwIfIndex log logging.Logger } @@ -134,10 +136,11 @@ func NewProxyArpVppHandler(callsChan govppapi.Channel, log logging.Logger, stopw } // NewRouteVppHandler creates new instance of route vppcalls handler -func NewRouteVppHandler(callsChan govppapi.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*routeHandler, error) { +func NewRouteVppHandler(callsChan govppapi.Channel, ifIndexes ifaceidx.SwIfIndex, log logging.Logger, stopwatch *measure.Stopwatch) (*routeHandler, error) { handler := &routeHandler{ callsChannel: callsChan, stopwatch: stopwatch, + ifIndexes: ifIndexes, log: log, } if err := handler.callsChannel.CheckMessageCompatibility(RouteMessages...); err != nil { diff --git a/plugins/vpp/l3plugin/vppcalls/dump_vppcalls.go b/plugins/vpp/l3plugin/vppcalls/dump_vppcalls.go index f8387b1212..1c3754844f 100644 --- a/plugins/vpp/l3plugin/vppcalls/dump_vppcalls.go +++ b/plugins/vpp/l3plugin/vppcalls/dump_vppcalls.go @@ -21,17 +21,42 @@ import ( "time" - "github.com/ligato/cn-infra/utils/addrs" l3binapi "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" + "github.com/ligato/vpp-agent/plugins/vpp/model/l3" ) -func (handler *routeHandler) DumpStaticRoutes() ([]*Route, error) { +// RouteDetails is object returned as a VPP dump. It contains static route data in proto format, and VPP-specific +// metadata +type RouteDetails struct { + Route *l3.StaticRoutes_Route + Meta *RouteMeta +} + +// RouteMeta holds fields returned from the VPP as details which are not in the model +type RouteMeta struct { + TableName string + OutgoingIfIdx uint32 + Afi uint8 + IsLocal bool + IsUDPEncap bool + IsUnreach bool + IsProhibit bool + IsResolveHost bool + IsResolveAttached bool + IsDvr bool + IsSourceLookup bool + NextHopID uint32 + RpfID uint32 + LabelStack []l3binapi.FibMplsLabel +} + +func (handler *routeHandler) DumpStaticRoutes() ([]*RouteDetails, error) { // IPFibDump time measurement defer func(t time.Time) { handler.stopwatch.TimeLog(l3binapi.IPFibDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) - var routes []*Route + var routes []*RouteDetails // Dump IPv4 l3 FIB. reqCtx := handler.callsChannel.SendMultiRequest(&l3binapi.IPFibDump{}) @@ -39,20 +64,16 @@ func (handler *routeHandler) DumpStaticRoutes() ([]*Route, error) { fibDetails := &l3binapi.IPFibDetails{} stop, err := reqCtx.ReceiveReply(fibDetails) if stop { - break // Break from the loop. + break } if err != nil { return nil, err } - if len(fibDetails.Path) > 0 && fibDetails.Path[0].IsDrop == 1 { - // skip drop routes, not supported by vpp-agent - continue - } ipv4Route, err := handler.dumpStaticRouteIPv4Details(fibDetails) if err != nil { return nil, err } - routes = append(routes, ipv4Route) + routes = append(routes, ipv4Route...) } // Dump IPv6 l3 FIB. @@ -61,81 +82,132 @@ func (handler *routeHandler) DumpStaticRoutes() ([]*Route, error) { fibDetails := &l3binapi.IP6FibDetails{} stop, err := reqCtx.ReceiveReply(fibDetails) if stop { - break // break out of the loop + break } if err != nil { return nil, err } - if len(fibDetails.Path) > 0 && fibDetails.Path[0].IsDrop == 1 { - // skip drop routes, not supported by vpp-agent - continue - } ipv6Route, err := handler.dumpStaticRouteIPv6Details(fibDetails) if err != nil { return nil, err } - routes = append(routes, ipv6Route) + routes = append(routes, ipv6Route...) } return routes, nil } -func (handler *routeHandler) dumpStaticRouteIPv4Details(fibDetails *l3binapi.IPFibDetails) (*Route, error) { +func (handler *routeHandler) dumpStaticRouteIPv4Details(fibDetails *l3binapi.IPFibDetails) ([]*RouteDetails, error) { return handler.dumpStaticRouteIPDetails(fibDetails.TableID, fibDetails.TableName, fibDetails.Address, fibDetails.AddressLength, fibDetails.Path, false) } -func (handler *routeHandler) dumpStaticRouteIPv6Details(fibDetails *l3binapi.IP6FibDetails) (*Route, error) { +func (handler *routeHandler) dumpStaticRouteIPv6Details(fibDetails *l3binapi.IP6FibDetails) ([]*RouteDetails, error) { return handler.dumpStaticRouteIPDetails(fibDetails.TableID, fibDetails.TableName, fibDetails.Address, fibDetails.AddressLength, fibDetails.Path, true) } -// dumpStaticRouteIPDetails processes static route details and returns a route object -func (handler *routeHandler) dumpStaticRouteIPDetails(tableID uint32, tableName []byte, address []byte, prefixLen uint8, path []l3binapi.FibPath, ipv6 bool) (*Route, error) { - // route details - var ipAddr string +// dumpStaticRouteIPDetails processes static route details and returns a route objects. Number of routes returned +// depends on size of path list. +func (handler *routeHandler) dumpStaticRouteIPDetails(tableID uint32, tableName []byte, address []byte, prefixLen uint8, paths []l3binapi.FibPath, ipv6 bool) ([]*RouteDetails, error) { + // Common fields for every route path (destination IP, VRF) + var dstIP string if ipv6 { - ipAddr = fmt.Sprintf("%s/%d", net.IP(address).To16().String(), uint32(prefixLen)) + dstIP = fmt.Sprintf("%s/%d", net.IP(address).To16().String(), uint32(prefixLen)) } else { - ipAddr = fmt.Sprintf("%s/%d", net.IP(address[:4]).To4().String(), uint32(prefixLen)) + dstIP = fmt.Sprintf("%s/%d", net.IP(address[:4]).To4().String(), uint32(prefixLen)) } - rt := &Route{ - Type: IntraVrf, // default - } + var routeDetails []*RouteDetails - // IP net - parsedIP, _, err := addrs.ParseIPWithPrefix(ipAddr) - if err != nil { - return nil, err - } + // Paths + if len(paths) > 0 { + for _, path := range paths { + if uintToBool(path.IsDrop) { + // skip drop routes, not supported by vpp-agent + continue + } + // Next hop IP address + var nextHopIP string + if ipv6 { + nextHopIP = fmt.Sprintf("%s", net.IP(path.NextHop).To16().String()) + } else { + nextHopIP = fmt.Sprintf("%s", net.IP(path.NextHop[:4]).To4().String()) + } - rt.TableName = string(bytes.SplitN(tableName, []byte{0x00}, 2)[0]) - rt.VrfID = tableID - rt.DstAddr = *parsedIP + // Route type (if via VRF is used) + var routeType l3.StaticRoutes_Route_RouteType + var viaVrfID uint32 + if path.SwIfIndex == NextHopOutgoingIfUnset && path.TableID != tableID { + // outgoing interface not specified and path table id not equal to route table id = inter-VRF route + routeType = l3.StaticRoutes_Route_INTER_VRF + viaVrfID = path.TableID + } else { + routeType = l3.StaticRoutes_Route_INTRA_VRF // default + } - if len(path) > 0 { - // TODO: if len(path) > 1, it means multiple NB routes (load-balancing) - not implemented properly + // Outgoing interface + var ifName string + var ifIdx uint32 + if path.SwIfIndex != ^uint32(0) { + var exists bool + ifIdx = path.SwIfIndex + if ifName, _, exists = handler.ifIndexes.LookupName(path.SwIfIndex); !exists { + handler.log.Warnf("Static route dump: interface name for index %d not found", path.SwIfIndex) + } + } - var nextHopAddr net.IP - if ipv6 { - nextHopAddr = net.IP(path[0].NextHop).To16() - } else { - nextHopAddr = net.IP(path[0].NextHop[:4]).To4() - } + // Route configuration + route := &l3.StaticRoutes_Route{ + Type: routeType, + VrfId: tableID, + DstIpAddr: dstIP, + NextHopAddr: nextHopIP, + OutgoingInterface: ifName, + Weight: uint32(path.Weight), + Preference: uint32(path.Preference), + ViaVrfId: viaVrfID, + } - rt.NextHopAddr = nextHopAddr + // Route metadata + meta := &RouteMeta{ + TableName: string(bytes.SplitN(tableName, []byte{0x00}, 2)[0]), + OutgoingIfIdx: ifIdx, + NextHopID: path.NextHopID, + RpfID: path.RpfID, + Afi: path.Afi, + IsLocal: uintToBool(path.IsLocal), + IsUDPEncap: uintToBool(path.IsUDPEncap), + IsDvr: uintToBool(path.IsDvr), + IsProhibit: uintToBool(path.IsProhibit), + IsResolveAttached: uintToBool(path.IsResolveAttached), + IsResolveHost: uintToBool(path.IsResolveHost), + IsSourceLookup: uintToBool(path.IsSourceLookup), + IsUnreach: uintToBool(path.IsUnreach), + LabelStack: path.LabelStack, + } - if path[0].SwIfIndex == NextHopOutgoingIfUnset && path[0].TableID != tableID { - // outgoing interface not specified and path table id not equal to route table id = inter-VRF route - rt.Type = InterVrf - rt.ViaVrfId = path[0].TableID + routeDetails = append(routeDetails, &RouteDetails{ + Route: route, + Meta: meta, + }) } - - rt.OutIface = path[0].SwIfIndex - rt.Preference = uint32(path[0].Preference) - rt.Weight = uint32(path[0].Weight) + } else { + // Return route without path fields, but this is not a valid configuration + handler.log.Warnf("Route with destination IP %s (VRF %d) has no path specified", dstIP, tableID) + route := &l3.StaticRoutes_Route{ + Type: l3.StaticRoutes_Route_INTRA_VRF, // default + VrfId: tableID, + DstIpAddr: dstIP, + } + meta := &RouteMeta{ + TableName: string(bytes.SplitN(tableName, []byte{0x00}, 2)[0]), + } + routeDetails = append(routeDetails, &RouteDetails{ + Route: route, + Meta: meta, + }) } - return rt, nil + return routeDetails, nil } func (handler *arpVppHandler) DumpArpEntries() ([]*ArpEntry, error) { diff --git a/plugins/vpp/l3plugin/vppcalls/dump_vppcalls_test.go b/plugins/vpp/l3plugin/vppcalls/dump_vppcalls_test.go index c8df8a69e3..15cb971041 100644 --- a/plugins/vpp/l3plugin/vppcalls/dump_vppcalls_test.go +++ b/plugins/vpp/l3plugin/vppcalls/dump_vppcalls_test.go @@ -18,8 +18,10 @@ import ( "testing" "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/vpp-agent/idxvpp/nametoidx" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" ) @@ -27,10 +29,14 @@ import ( // Test dumping routes func TestDumpStaticRoutes(t *testing.T) { ctx := vppcallmock.SetupTestCtx(t) - l3handler, err := NewRouteVppHandler(ctx.MockChannel, logrus.DefaultLogger(), nil) + ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "rt-dump-if-idx", nil)) + l3handler, err := NewRouteVppHandler(ctx.MockChannel, ifIndexes, logrus.DefaultLogger(), nil) Expect(err).To(BeNil()) defer ctx.TeardownTestCtx() + ifIndexes.RegisterName("if1", 2, nil) + ifIndexes.RegisterName("if2", 3, nil) + ctx.MockVpp.MockReply(&ip.IPFibDetails{ Path: []ip.FibPath{{SwIfIndex: 3}}, }) @@ -40,9 +46,9 @@ func TestDumpStaticRoutes(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - routes, err := l3handler.DumpStaticRoutes() + rtDetails, err := l3handler.DumpStaticRoutes() Expect(err).To(Succeed()) - Expect(routes).To(HaveLen(2)) - Expect(routes[0].OutIface).To(Equal(uint32(3))) - Expect(routes[1].OutIface).To(Equal(uint32(2))) + Expect(rtDetails).To(HaveLen(2)) + Expect(rtDetails[0].Route.OutgoingInterface).To(Equal("if2")) + Expect(rtDetails[1].Route.OutgoingInterface).To(Equal("if1")) } diff --git a/plugins/vpp/l3plugin/vppcalls/route_vppcalls_test.go b/plugins/vpp/l3plugin/vppcalls/route_vppcalls_test.go index a3fbda30ab..d40519f08e 100644 --- a/plugins/vpp/l3plugin/vppcalls/route_vppcalls_test.go +++ b/plugins/vpp/l3plugin/vppcalls/route_vppcalls_test.go @@ -19,8 +19,10 @@ import ( "testing" "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/vpp-agent/idxvpp/nametoidx" "github.com/ligato/vpp-agent/plugins/vpp/binapi/ip" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" "github.com/ligato/vpp-agent/tests/vppcallmock" @@ -80,7 +82,8 @@ func routeTestSetup(t *testing.T) (*vppcallmock.TestCtx, ifvppcalls.IfVppAPI, vp log := logrus.NewLogger("test-log") ifHandler, err := ifvppcalls.NewIfVppHandler(ctx.MockChannel, log, nil) Expect(err).To(BeNil()) - rtHandler, err := vppcalls.NewRouteVppHandler(ctx.MockChannel, log, nil) + ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(log, "rt-if-idx", nil)) + rtHandler, err := vppcalls.NewRouteVppHandler(ctx.MockChannel, ifIndexes, log, nil) Expect(err).To(BeNil()) return ctx, ifHandler, rtHandler } From c3e469377f65fdfaf1dfc1d9fe32c14618ad5ed2 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 30 Jul 2018 10:26:30 +0200 Subject: [PATCH 061/174] address comments Signed-off-by: Vladimir Lavor --- plugins/rest/rest_handlers.go | 6 +++--- plugins/vpp/model/bfd/keys_agent.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index 2bd4c916f1..7c21d0d785 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -125,11 +125,11 @@ func (plugin *Plugin) registerHTTPHandler(key, method string, f func() (interfac res, err := f() if err != nil { plugin.Deps.Log.Errorf("Error: %v", err) - w.Write([]byte("500 Internal server error: " + err.Error())) - formatter.JSON(w, http.StatusInternalServerError, err) + errStr := fmt.Sprintf("500 Internal server error: %s\n", err.Error()) + formatter.Text(w, http.StatusInternalServerError, errStr) return } - plugin.Deps.Log.Debug("Rest uri: %s, data: %v", key, res) + plugin.Deps.Log.Debugf("Rest uri: %s, data: %v", key, res) formatter.JSON(w, http.StatusOK, res) } } diff --git a/plugins/vpp/model/bfd/keys_agent.go b/plugins/vpp/model/bfd/keys_agent.go index 00663f46f4..fc5d72916b 100644 --- a/plugins/vpp/model/bfd/keys_agent.go +++ b/plugins/vpp/model/bfd/keys_agent.go @@ -20,7 +20,7 @@ const ( // bfdAuthKeysPrefix bfd-key/ AuthKeysPrefix = "vpp/config/v1/bfd/auth-key/" // BfdEchoFunctionPrefix bfd-echo-function/ - EchoFunctionPrefix = "vpp/config/v1/bfd/echo-function" + EchoFunctionPrefix = "vpp/config/v1/bfd/echo-function/" ) // SessionKey returns the prefix used in ETCD to store vpp bfd config From ab84b9f96f62ce3f32d9b8cb7b2dd74529107723 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 30 Jul 2018 11:03:45 +0200 Subject: [PATCH 062/174] address comments Signed-off-by: Vladimir Lavor --- plugins/vpp/ifplugin/bfd_config.go | 4 ++-- plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go | 2 +- plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go | 8 ++++---- plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go | 2 +- plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/plugins/vpp/ifplugin/bfd_config.go b/plugins/vpp/ifplugin/bfd_config.go index 812f067d7d..53e1b40d9e 100644 --- a/plugins/vpp/ifplugin/bfd_config.go +++ b/plugins/vpp/ifplugin/bfd_config.go @@ -344,7 +344,7 @@ func (plugin *BFDConfigurator) DeleteBfdAuthKey(bfdInput *bfd.SingleHopBFD_Key) if err != nil { return fmt.Errorf("error while removing BFD auth key with ID %v", bfdInput.Id) } - authKeyIDAsString := strconv.FormatUint(uint64(bfdInput.Id), 10) + authKeyIDAsString := AuthKeyIdentifier(bfdInput.Id) plugin.keysIndexes.UnregisterName(authKeyIDAsString) plugin.log.Debugf("BFD authentication key with id %v unregistered", bfdInput.Id) // Recreate BFD sessions if necessary @@ -414,5 +414,5 @@ func (plugin *BFDConfigurator) DeleteBfdEchoFunction(bfdInput *bfd.SingleHopBFD_ // Generates common identifier for authentication key func AuthKeyIdentifier(id uint32) string { - return strconv.FormatUint(uint64(id), 10) + return strconv.Itoa(int(id)) } diff --git a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go index 0e42aee901..3839da424c 100644 --- a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls.go @@ -63,7 +63,7 @@ func (handler *bfdVppHandler) AddBfdUDPSession(bfdSess *bfd.SingleHopBFD_Session // Authentication if bfdSess.Authentication != nil { - keyID := strconv.FormatUint(uint64(bfdSess.Authentication.KeyId), 10) + keyID := strconv.Itoa(int(bfdSess.Authentication.KeyId)) handler.log.Infof("Setting up authentication with index %v", keyID) _, _, found := bfdKeyIndexes.LookupIdx(keyID) if found { diff --git a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go index af732bf12a..9474761dae 100644 --- a/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/bfd_vppcalls_test.go @@ -21,12 +21,12 @@ import ( "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/vpp-agent/idxvpp/nametoidx" bfd_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" - "strconv" ) func TestAddBfdUDPSession(t *testing.T) { @@ -34,7 +34,7 @@ func TestAddBfdUDPSession(t *testing.T) { defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) - bfdKeyIndexes.RegisterName(strconv.FormatUint(uint64(1), 10), 1, nil) + bfdKeyIndexes.RegisterName(ifplugin.AuthKeyIdentifier(1), 1, nil) ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) @@ -66,7 +66,7 @@ func TestAddBfdUDPSessionIPv6(t *testing.T) { defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) - bfdKeyIndexes.RegisterName(strconv.FormatUint(uint64(1), 10), 1, nil) + bfdKeyIndexes.RegisterName(ifplugin.AuthKeyIdentifier(1), 1, nil) ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) @@ -153,7 +153,7 @@ func TestAddBfdUDPSessionIncorrectSrcIPError(t *testing.T) { defer ctx.TeardownTestCtx() bfdKeyIndexes := nametoidx.NewNameToIdx(logrus.DefaultLogger(), "bfd", nil) - bfdKeyIndexes.RegisterName(strconv.FormatUint(uint64(1), 10), 1, nil) + bfdKeyIndexes.RegisterName(ifplugin.AuthKeyIdentifier(1), 1, nil) ctx.MockVpp.MockReply(&bfd_api.BfdUDPAddReply{}) diff --git a/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go index 35977c4bd7..72b0d494a0 100644 --- a/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go +++ b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls.go @@ -106,7 +106,7 @@ func (handler *bridgeDomainVppHandler) DumpBridgeDomainIDs() ([]uint32, error) { }(time.Now()) req := &l2ba.BridgeDomainDump{BdID: ^uint32(0)} - activeDomains := make([]uint32, 1) + var activeDomains []uint32 reqCtx := handler.callsChannel.SendMultiRequest(req) for { msg := &l2ba.BridgeDomainDetails{} diff --git a/plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go index 0b32a22738..b343f1839b 100644 --- a/plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go +++ b/plugins/vpp/l2plugin/vppcalls/dump_vppcalls_test.go @@ -116,7 +116,7 @@ func TestDumpBridgeDomainIDs(t *testing.T) { activeDomains, err := bdHandler.DumpBridgeDomainIDs() Expect(err).To(BeNil()) - Expect(activeDomains).To(Equal([]uint32{0, 4, 5})) + Expect(activeDomains).To(Equal([]uint32{4, 5})) ctx.MockVpp.MockReply(&l2ba.BridgeDomainAddDelReply{}) _, err = bdHandler.DumpBridgeDomainIDs() From 6be1212716286ed8b6aa43f8b68fcc1179b3c703 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 30 Jul 2018 11:21:35 +0200 Subject: [PATCH 063/174] route rest support Signed-off-by: Vladimir Lavor --- plugins/rest/README.md | 2 +- plugins/rest/plugin_impl_rest.go | 10 ++++--- plugins/rest/rest_handlers.go | 46 ++++++-------------------------- plugins/rest/resturl/rest_url.go | 6 +++++ 4 files changed, 22 insertions(+), 42 deletions(-) diff --git a/plugins/rest/README.md b/plugins/rest/README.md index 4d212c8b0b..b5c8859b74 100644 --- a/plugins/rest/README.md +++ b/plugins/rest/README.md @@ -68,7 +68,7 @@ curl http://0.0.0.0:9191/vpp/dump/v1/xc ARPs and static routes exposed via REST: ``` -curl http://0.0.0.0:9191/staticroutes +curl http://0.0.0.0:9191/vpp/dump/v1/routes curl http://0.0.0.0:9191/arps ``` diff --git a/plugins/rest/plugin_impl_rest.go b/plugins/rest/plugin_impl_rest.go index 8c7db23945..7ffa98cf0c 100644 --- a/plugins/rest/plugin_impl_rest.go +++ b/plugins/rest/plugin_impl_rest.go @@ -29,6 +29,7 @@ import ( aclvppcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" l2vppcalls "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" + l3vppcalls "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" ) const ( @@ -57,6 +58,7 @@ type Plugin struct { bdHandler l2vppcalls.BridgeDomainVppRead fibHandler l2vppcalls.FibVppRead xcHandler l2vppcalls.XConnectVppRead + rtHandler l3vppcalls.RouteVppRead sync.Mutex } @@ -111,6 +113,9 @@ func (plugin *Plugin) Init() (err error) { if plugin.xcHandler, err = l2vppcalls.NewXConnectVppHandler(plugin.vppChan, ifIndexes, plugin.Log, nil); err != nil { return err } + if plugin.rtHandler, err = l3vppcalls.NewRouteVppHandler(plugin.vppChan, ifIndexes, plugin.Log, nil); err != nil { + return err + } plugin.indexItems = []indexItem{ {Name: "ACL IP", Path: resturl.AclIP}, @@ -126,10 +131,9 @@ func (plugin *Plugin) Init() (err error) { {Name: "Bridge domain IDs", Path: resturl.BdId}, {Name: "L2Fibs", Path: resturl.Fib}, {Name: "XConnectorPairs", Path: resturl.Xc}, + {Name: "Static routes", Path: resturl.Routes}, {Name: "ARPs", Path: "/arps"}, - {Name: "Static routes", Path: "/staticroutes"}, - {Name: "Telemetry", Path: "/telemetry"}, } return nil @@ -143,9 +147,9 @@ func (plugin *Plugin) AfterInit() (err error) { plugin.registerInterfaceHandlers() plugin.registerBfdHandlers() plugin.registerL2Handlers() + plugin.registerL3Handlers() plugin.HTTPHandlers.RegisterHTTPHandler("/arps", plugin.arpGetHandler, "GET") - plugin.HTTPHandlers.RegisterHTTPHandler("/staticroutes", plugin.staticRoutesGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler(fmt.Sprintf("/acl/interface/{%s:[0-9]+}", swIndexVarName), plugin.interfaceACLGetHandler, "GET") plugin.HTTPHandlers.RegisterHTTPHandler("/command", plugin.commandHandler, "POST") diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index 506dde60ca..5a40653a3f 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -115,6 +115,14 @@ func (plugin *Plugin) registerL2Handlers() { }) } +// Registers L3 plugin REST handlers +func (plugin *Plugin) registerL3Handlers() { + // GET static routes + plugin.registerHTTPHandler(resturl.Routes, GET, func() (interface{}, error) { + return plugin.rtHandler.DumpStaticRoutes() + }) +} + // registerHTTPHandler is common register method for all handlers func (plugin *Plugin) registerHTTPHandler(key, method string, f func() (interface{}, error)) { handlerFunc := func(formatter *render.Render) http.HandlerFunc { @@ -169,44 +177,6 @@ func (plugin *Plugin) arpGetHandler(formatter *render.Render) http.HandlerFunc { } } -// staticRoutesGetHandler - used to get list of all static routes -func (plugin *Plugin) staticRoutesGetHandler(formatter *render.Render) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - - plugin.Log.Debug("Getting list of all static routes") - - // create an API channel - ch, err := plugin.GoVppmux.NewAPIChannel() - if err != nil { - plugin.Log.Errorf("Error creating channel: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - defer ch.Close() - - if plugin.ifIndexes == nil { - plugin.Log.Error("Error creating VPP handler: missing interface indexes") - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - l3Handler, err := l3plugin.NewRouteVppHandler(ch, plugin.ifIndexes, plugin.Log, nil) - if err != nil { - plugin.Log.Errorf("Error creating VPP handler: %v", err) - formatter.JSON(w, http.StatusInternalServerError, err) - return - } - res, err := l3Handler.DumpStaticRoutes() - if err != nil { - plugin.Log.Errorf("Error: %v", err) - formatter.JSON(w, http.StatusInternalServerError, nil) - return - } - - plugin.Log.Debug(res) - formatter.JSON(w, http.StatusOK, res) - } -} - // interfaceACLGetHandler - used to get acl configuration for a particular interface func (plugin *Plugin) interfaceACLGetHandler(formatter *render.Render) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { diff --git a/plugins/rest/resturl/rest_url.go b/plugins/rest/resturl/rest_url.go index 72c7086602..96728185af 100644 --- a/plugins/rest/resturl/rest_url.go +++ b/plugins/rest/resturl/rest_url.go @@ -61,3 +61,9 @@ const ( // restXc is rest cross-connect path Xc = "/vpp/dump/v1/xc" ) + +// L3 plugin +const ( + // Routes is rest static route path + Routes = "/vpp/dump/v1/routes" +) From b2b0df09320e50e042410768183b5feaf8d6d55c Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 30 Jul 2018 12:52:53 +0200 Subject: [PATCH 064/174] rest support for NAT Signed-off-by: Vladimir Lavor --- plugins/vpp/ifplugin/nat_config.go | 7 +- .../ifplugin/vppcalls/dump_nat_vppcalls.go | 51 +++++++++---- .../vppcalls/dump_nat_vppcalls_test.go | 22 +++--- .../ifplugin/vppcalls/nat_vppcalls_test.go | 72 +++++++++---------- 4 files changed, 86 insertions(+), 66 deletions(-) diff --git a/plugins/vpp/ifplugin/nat_config.go b/plugins/vpp/ifplugin/nat_config.go index d6a69d2090..79d3c835a7 100644 --- a/plugins/vpp/ifplugin/nat_config.go +++ b/plugins/vpp/ifplugin/nat_config.go @@ -116,7 +116,8 @@ func (plugin *NatConfigurator) Init(logger logging.PluginLogger, goVppMux govppm } // VPP API handler - if plugin.natHandler, err = vppcalls.NewNatVppHandler(plugin.vppChan, plugin.vppDumpChan, plugin.log, plugin.stopwatch); err != nil { + if plugin.natHandler, err = vppcalls.NewNatVppHandler(plugin.vppChan, plugin.vppDumpChan, plugin.ifIndexes, + plugin.log, plugin.stopwatch); err != nil { return err } @@ -442,12 +443,12 @@ func (plugin *NatConfigurator) ResolveDeletedInterface(ifName string, ifIdx uint // DumpNatGlobal returns the current NAT44 global config func (plugin *NatConfigurator) DumpNatGlobal() (*nat.Nat44Global, error) { - return plugin.natHandler.Nat44GlobalConfigDump(plugin.ifIndexes) + return plugin.natHandler.Nat44GlobalConfigDump() } // DumpNatDNat returns the current NAT44 DNAT config func (plugin *NatConfigurator) DumpNatDNat() (*nat.Nat44DNat, error) { - return plugin.natHandler.NAT44DNatDump(plugin.ifIndexes) + return plugin.natHandler.NAT44DNatDump() } // enables set of interfaces as inside/outside in NAT diff --git a/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls.go index 9c8a205454..39ba96f58e 100644 --- a/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls.go @@ -22,22 +22,43 @@ import ( "time" bin_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/nat" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/model/nat" ) -func (handler *natVppHandler) Nat44GlobalConfigDump(swIfIndices ifaceidx.SwIfIndex) (*nat.Nat44Global, error) { +// Nat44Details contains all configuration available for network address translation. +// Note: SNAT is currently skipped, since there is no model defined for it +type Nat44Details struct { + Global *nat.Nat44Global + DNat *nat.Nat44DNat +} + +func (handler *natVppHandler) Nat44Dump() (*Nat44Details, error) { + global, err := handler.Nat44GlobalConfigDump() + if err != nil { + return nil, err + } + dNat, err := handler.NAT44DNatDump() + if err != nil { + return nil, err + } + return &Nat44Details{ + Global: global, + DNat: dNat, + }, nil +} + +func (handler *natVppHandler) Nat44GlobalConfigDump() (*nat.Nat44Global, error) { handler.log.Debug("dumping Nat44Global") // Dump all necessary data to reconstruct global NAT configuration isEnabled, err := handler.nat44IsForwardingEnabled() if err != nil { return nil, err } - natInterfaces, err := handler.Nat44InterfaceDump(swIfIndices) + natInterfaces, err := handler.Nat44InterfaceDump() if err != nil { return nil, err } - natOutputFeature, err := handler.nat44InterfaceOutputFeatureDump(swIfIndices) + natOutputFeature, err := handler.nat44InterfaceOutputFeatureDump() if err != nil { return nil, err } @@ -73,14 +94,14 @@ func (handler *natVppHandler) Nat44GlobalConfigDump(swIfIndices ifaceidx.SwIfInd }, nil } -func (handler *natVppHandler) NAT44DNatDump(swIfIndices ifaceidx.SwIfIndex) (*nat.Nat44DNat, error) { +func (handler *natVppHandler) NAT44DNatDump() (*nat.Nat44DNat, error) { // List od DNAT configs var dNatCfgs []*nat.Nat44DNat_DNatConfig handler.log.Debug("dumping DNat") // Static mappings - natStMappings, err := handler.nat44StaticMappingDump(swIfIndices) + natStMappings, err := handler.nat44StaticMappingDump() if err != nil { return nil, fmt.Errorf("failed to dump NAT44 static mappings: %v", err) } @@ -96,7 +117,7 @@ func (handler *natVppHandler) NAT44DNatDump(swIfIndices ifaceidx.SwIfIndex) (*na handler.processDNatData(tag, data, &dNatCfgs) } // Identity mappings - natIdMappings, err := handler.nat44IdentityMappingDump(swIfIndices) + natIdMappings, err := handler.nat44IdentityMappingDump() if err != nil { return nil, fmt.Errorf("failed to dump NAT44 identity mappings: %v", err) } @@ -145,7 +166,7 @@ func (handler *natVppHandler) nat44AddressDump() (addresses []*nat.Nat44Global_A } // nat44StaticMappingDump returns a map of static mapping tag/data pairs -func (handler *natVppHandler) nat44StaticMappingDump(swIfIndices ifaceidx.SwIfIndex) (entries map[string]*nat.Nat44DNat_DNatConfig_StaticMapping, err error) { +func (handler *natVppHandler) nat44StaticMappingDump() (entries map[string]*nat.Nat44DNat_DNatConfig_StaticMapping, err error) { defer func(t time.Time) { handler.stopwatch.TimeLog(bin_api.Nat44StaticMappingDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) @@ -174,7 +195,7 @@ func (handler *natVppHandler) nat44StaticMappingDump(swIfIndices ifaceidx.SwIfIn entries[tag] = &nat.Nat44DNat_DNatConfig_StaticMapping{ VrfId: msg.VrfID, ExternalInterface: func(ifIdx uint32) string { - ifName, _, found := swIfIndices.LookupName(ifIdx) + ifName, _, found := handler.ifIndexes.LookupName(ifIdx) if !found && ifIdx != 0xffffffff { handler.log.Warnf("Interface with index %v not found in the mapping", ifIdx) } @@ -247,7 +268,7 @@ func (handler *natVppHandler) nat44StaticMappingLbDump() (entries map[string]*na } // nat44IdentityMappingDump returns a map of identity mapping tag/data pairs -func (handler *natVppHandler) nat44IdentityMappingDump(swIfIndices ifaceidx.SwIfIndex) (entries map[string]*nat.Nat44DNat_DNatConfig_IdentityMapping, err error) { +func (handler *natVppHandler) nat44IdentityMappingDump() (entries map[string]*nat.Nat44DNat_DNatConfig_IdentityMapping, err error) { defer func(t time.Time) { handler.stopwatch.TimeLog(bin_api.Nat44IdentityMappingDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) @@ -275,7 +296,7 @@ func (handler *natVppHandler) nat44IdentityMappingDump(swIfIndices ifaceidx.SwIf entries[tag] = &nat.Nat44DNat_DNatConfig_IdentityMapping{ VrfId: msg.VrfID, AddressedInterface: func(ifIdx uint32) string { - ifName, _, found := swIfIndices.LookupName(ifIdx) + ifName, _, found := handler.ifIndexes.LookupName(ifIdx) if !found && ifIdx != 0xffffffff { handler.log.Warnf("Interface with index %v not found in the mapping", ifIdx) } @@ -292,7 +313,7 @@ func (handler *natVppHandler) nat44IdentityMappingDump(swIfIndices ifaceidx.SwIf return entries, nil } -func (handler *natVppHandler) Nat44InterfaceDump(swIfIndices ifaceidx.SwIfIndex) (interfaces []*nat.Nat44Global_NatInterface, err error) { +func (handler *natVppHandler) Nat44InterfaceDump() (interfaces []*nat.Nat44Global_NatInterface, err error) { defer func(t time.Time) { handler.stopwatch.TimeLog(bin_api.Nat44InterfaceDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) @@ -311,7 +332,7 @@ func (handler *natVppHandler) Nat44InterfaceDump(swIfIndices ifaceidx.SwIfIndex) } // Find interface name - ifName, _, found := swIfIndices.LookupName(msg.SwIfIndex) + ifName, _, found := handler.ifIndexes.LookupName(msg.SwIfIndex) if !found { handler.log.Warnf("Interface with index %d not found in the mapping", msg.SwIfIndex) continue @@ -337,7 +358,7 @@ func (handler *natVppHandler) Nat44InterfaceDump(swIfIndices ifaceidx.SwIfIndex) } // nat44InterfaceOutputFeatureDump returns a list of interfaces with output feature set -func (handler *natVppHandler) nat44InterfaceOutputFeatureDump(swIfIndices ifaceidx.SwIfIndex) (ifaces []*nat.Nat44Global_NatInterface, err error) { +func (handler *natVppHandler) nat44InterfaceOutputFeatureDump() (ifaces []*nat.Nat44Global_NatInterface, err error) { defer func(t time.Time) { handler.stopwatch.TimeLog(bin_api.Nat44InterfaceOutputFeatureDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) @@ -356,7 +377,7 @@ func (handler *natVppHandler) nat44InterfaceOutputFeatureDump(swIfIndices ifacei } // Find interface name - ifName, _, found := swIfIndices.LookupName(msg.SwIfIndex) + ifName, _, found := handler.ifIndexes.LookupName(msg.SwIfIndex) if !found { handler.log.Warnf("Interface with index %d not found in the mapping", msg.SwIfIndex) continue diff --git a/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls_test.go index 62635b2048..e72b2ff191 100644 --- a/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_nat_vppcalls_test.go @@ -29,7 +29,7 @@ import ( ) func TestNat44InterfaceDump(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, swIfIndexes := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bin_api.Nat44InterfaceDetails{ @@ -38,17 +38,16 @@ func TestNat44InterfaceDump(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test-sw_if_indexes", ifaceidx.IndexMetadata)) swIfIndexes.RegisterName("if0", 1, nil) - ifaces, err := natHandler.Nat44InterfaceDump(swIfIndexes) + ifaces, err := natHandler.Nat44InterfaceDump() Expect(err).To(Succeed()) Expect(ifaces).To(HaveLen(1)) Expect(ifaces[0].IsInside).To(BeFalse()) } func TestNat44InterfaceDump2(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, swIfIndexes := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bin_api.Nat44InterfaceDetails{ @@ -57,17 +56,16 @@ func TestNat44InterfaceDump2(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test-sw_if_indexes", ifaceidx.IndexMetadata)) swIfIndexes.RegisterName("if0", 1, nil) - ifaces, err := natHandler.Nat44InterfaceDump(swIfIndexes) + ifaces, err := natHandler.Nat44InterfaceDump() Expect(err).To(Succeed()) Expect(ifaces).To(HaveLen(1)) Expect(ifaces[0].IsInside).To(BeTrue()) } func TestNat44InterfaceDump3(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, swIfIndexes := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&bin_api.Nat44InterfaceDetails{ @@ -76,20 +74,20 @@ func TestNat44InterfaceDump3(t *testing.T) { }) ctx.MockVpp.MockReply(&vpe.ControlPingReply{}) - swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test-sw_if_indexes", ifaceidx.IndexMetadata)) swIfIndexes.RegisterName("if0", 1, nil) - ifaces, err := natHandler.Nat44InterfaceDump(swIfIndexes) + ifaces, err := natHandler.Nat44InterfaceDump() Expect(err).To(Succeed()) Expect(ifaces).To(HaveLen(2)) Expect(ifaces[0].IsInside).To(BeFalse()) Expect(ifaces[1].IsInside).To(BeTrue()) } -func natTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.NatVppAPI) { +func natTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.NatVppAPI, ifaceidx.SwIfIndexRW) { ctx := vppcallmock.SetupTestCtx(t) log := logrus.NewLogger("test-log") - natHandler, err := vppcalls.NewNatVppHandler(ctx.MockChannel, ctx.MockChannel, log, nil) + swIfIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "test-sw_if_indexes", ifaceidx.IndexMetadata)) + natHandler, err := vppcalls.NewNatVppHandler(ctx.MockChannel, ctx.MockChannel, swIfIndexes, log, nil) Expect(err).To(BeNil()) - return ctx, natHandler + return ctx, natHandler, swIfIndexes } diff --git a/plugins/vpp/ifplugin/vppcalls/nat_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/nat_vppcalls_test.go index 6bbb14c88b..1d6e9347ed 100644 --- a/plugins/vpp/ifplugin/vppcalls/nat_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/nat_vppcalls_test.go @@ -25,7 +25,7 @@ import ( ) func TestSetNat44Forwarding(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44ForwardingEnableDisableReply{}) @@ -40,7 +40,7 @@ func TestSetNat44Forwarding(t *testing.T) { } func TestUnsetNat44Forwarding(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44ForwardingEnableDisableReply{}) @@ -55,7 +55,7 @@ func TestUnsetNat44Forwarding(t *testing.T) { } func TestSetNat44ForwardingError(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() // Incorrect reply object @@ -66,7 +66,7 @@ func TestSetNat44ForwardingError(t *testing.T) { } func TestSetNat44ForwardingRetval(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44ForwardingEnableDisableReply{ @@ -78,7 +78,7 @@ func TestSetNat44ForwardingRetval(t *testing.T) { } func TestEnableNat44InterfaceAsInside(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelFeatureReply{}) @@ -95,7 +95,7 @@ func TestEnableNat44InterfaceAsInside(t *testing.T) { } func TestEnableNat44InterfaceAsOutside(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelFeatureReply{}) @@ -112,7 +112,7 @@ func TestEnableNat44InterfaceAsOutside(t *testing.T) { } func TestEnableNat44InterfaceError(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() // Incorrect reply object @@ -123,7 +123,7 @@ func TestEnableNat44InterfaceError(t *testing.T) { } func TestEnableNat44InterfaceRetval(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelFeatureReply{ @@ -135,7 +135,7 @@ func TestEnableNat44InterfaceRetval(t *testing.T) { } func TestDisableNat44InterfaceAsInside(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelFeatureReply{}) @@ -152,7 +152,7 @@ func TestDisableNat44InterfaceAsInside(t *testing.T) { } func TestDisableNat44InterfaceAsOutside(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelFeatureReply{}) @@ -169,7 +169,7 @@ func TestDisableNat44InterfaceAsOutside(t *testing.T) { } func TestEnableNat44InterfaceOutputAsInside(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelOutputFeatureReply{}) @@ -186,7 +186,7 @@ func TestEnableNat44InterfaceOutputAsInside(t *testing.T) { } func TestEnableNat44InterfaceOutputAsOutside(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelOutputFeatureReply{}) @@ -203,7 +203,7 @@ func TestEnableNat44InterfaceOutputAsOutside(t *testing.T) { } func TestEnableNat44InterfaceOutputError(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() // Incorrect reply object @@ -214,7 +214,7 @@ func TestEnableNat44InterfaceOutputError(t *testing.T) { } func TestEnableNat44InterfaceOutputRetval(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelOutputFeatureReply{ @@ -226,7 +226,7 @@ func TestEnableNat44InterfaceOutputRetval(t *testing.T) { } func TestDisableNat44InterfaceOutputAsInside(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelOutputFeatureReply{}) @@ -243,7 +243,7 @@ func TestDisableNat44InterfaceOutputAsInside(t *testing.T) { } func TestDisableNat44InterfaceOutputAsOutside(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44InterfaceAddDelOutputFeatureReply{}) @@ -260,7 +260,7 @@ func TestDisableNat44InterfaceOutputAsOutside(t *testing.T) { } func TestAddNat44AddressPool(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() firstIP := net.ParseIP("10.0.0.1").To4() @@ -281,7 +281,7 @@ func TestAddNat44AddressPool(t *testing.T) { } func TestAddNat44AddressPoolError(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() firstIP := net.ParseIP("10.0.0.1").To4() @@ -295,7 +295,7 @@ func TestAddNat44AddressPoolError(t *testing.T) { } func TestAddNat44AddressPoolRetval(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() firstIP := net.ParseIP("10.0.0.1").To4() @@ -310,7 +310,7 @@ func TestAddNat44AddressPoolRetval(t *testing.T) { } func TestDelNat44AddressPool(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() firstIP := net.ParseIP("10.0.0.1").To4() @@ -331,7 +331,7 @@ func TestDelNat44AddressPool(t *testing.T) { } func TestAddNat44StaticMapping(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() localIP := net.ParseIP("10.0.0.1").To4() @@ -373,7 +373,7 @@ func TestAddNat44StaticMapping(t *testing.T) { } func TestAddNat44StaticMappingAddrOnly(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() localIP := net.ParseIP("10.0.0.1").To4() @@ -402,7 +402,7 @@ func TestAddNat44StaticMappingAddrOnly(t *testing.T) { } func TestAddNat44StaticMappingError(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() // Incorrect reply object @@ -413,7 +413,7 @@ func TestAddNat44StaticMappingError(t *testing.T) { } func TestAddNat44StaticMappingRetval(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44AddDelStaticMappingReply{ @@ -425,7 +425,7 @@ func TestAddNat44StaticMappingRetval(t *testing.T) { } func TestDelNat44StaticMapping(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() localIP := net.ParseIP("10.0.0.1").To4() @@ -454,7 +454,7 @@ func TestDelNat44StaticMapping(t *testing.T) { } func TestDelNat44StaticMappingAddrOnly(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() localIP := net.ParseIP("10.0.0.1").To4() @@ -483,7 +483,7 @@ func TestDelNat44StaticMappingAddrOnly(t *testing.T) { } func TestAddNat44StaticMappingLb(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() externalIP := net.ParseIP("10.0.0.1").To4() @@ -532,7 +532,7 @@ func TestAddNat44StaticMappingLb(t *testing.T) { } func TestAddNat44StaticMappingLbError(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() // Incorrect reply object @@ -543,7 +543,7 @@ func TestAddNat44StaticMappingLbError(t *testing.T) { } func TestAddNat44StaticMappingLbRetval(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44AddDelLbStaticMappingReply{ @@ -555,7 +555,7 @@ func TestAddNat44StaticMappingLbRetval(t *testing.T) { } func TestDelNat44StaticMappingLb(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() externalIP := net.ParseIP("10.0.0.1").To4() @@ -604,7 +604,7 @@ func TestDelNat44StaticMappingLb(t *testing.T) { } func TestAddNat44IdentityMapping(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() address := net.ParseIP("10.0.0.1").To4() @@ -637,7 +637,7 @@ func TestAddNat44IdentityMapping(t *testing.T) { } func TestAddNat44IdentityMappingAddrOnly(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() // DataContext (IPAddress == nil and Port == 0 means it's address only) @@ -661,7 +661,7 @@ func TestAddNat44IdentityMappingAddrOnly(t *testing.T) { } func TestAddNat44IdentityMappingNoInterface(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() address := net.ParseIP("10.0.0.1").To4() @@ -690,7 +690,7 @@ func TestAddNat44IdentityMappingNoInterface(t *testing.T) { } func TestAddNat44IdentityMappingError(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() // Incorrect reply object @@ -701,7 +701,7 @@ func TestAddNat44IdentityMappingError(t *testing.T) { } func TestAddNat44IdentityMappingRetval(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() ctx.MockVpp.MockReply(&nat.Nat44AddDelIdentityMappingReply{ @@ -713,7 +713,7 @@ func TestAddNat44IdentityMappingRetval(t *testing.T) { } func TestDelNat44IdentityMapping(t *testing.T) { - ctx, natHandler := natTestSetup(t) + ctx, natHandler, _ := natTestSetup(t) defer ctx.TeardownTestCtx() address := net.ParseIP("10.0.0.1").To4() From e6e82f7f1755e9d7a76dcde12dc5968c95ab3773 Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 30 Jul 2018 12:53:52 +0200 Subject: [PATCH 065/174] rest support for STN Signed-off-by: Vladimir Lavor --- plugins/vpp/ifplugin/stn_config.go | 11 ++-- plugins/vpp/ifplugin/stn_config_test.go | 11 ++-- .../ifplugin/vppcalls/dump_stn_vppcalls.go | 53 ++++++++++++++++--- .../ifplugin/vppcalls/stn_vppcalls_test.go | 6 ++- 4 files changed, 62 insertions(+), 19 deletions(-) diff --git a/plugins/vpp/ifplugin/stn_config.go b/plugins/vpp/ifplugin/stn_config.go index b0a7ad424e..d6d23ef3f8 100644 --- a/plugins/vpp/ifplugin/stn_config.go +++ b/plugins/vpp/ifplugin/stn_config.go @@ -28,7 +28,6 @@ import ( "github.com/ligato/vpp-agent/idxvpp" "github.com/ligato/vpp-agent/idxvpp/nametoidx" "github.com/ligato/vpp-agent/plugins/govppmux" - "github.com/ligato/vpp-agent/plugins/vpp/binapi/stn" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" modelStn "github.com/ligato/vpp-agent/plugins/vpp/model/stn" @@ -90,7 +89,7 @@ func (plugin *StnConfigurator) Init(logger logging.PluginLogger, goVppMux govppm plugin.allIndexesSeq, plugin.unstoredIndexSeq = 1, 1 // VPP API handler - if plugin.stnHandler, err = vppcalls.NewStnVppHandler(plugin.vppChan, plugin.stopwatch); err != nil { + if plugin.stnHandler, err = vppcalls.NewStnVppHandler(plugin.vppChan, plugin.ifIndexes, plugin.log, plugin.stopwatch); err != nil { return err } @@ -201,13 +200,13 @@ func (plugin *StnConfigurator) Modify(ruleOld *modelStn.STN_Rule, ruleNew *model } // Dump STN rules configured on the VPP -func (plugin *StnConfigurator) Dump() ([]*stn.StnRulesDetails, error) { - rules, err := plugin.stnHandler.DumpStnRules() +func (plugin *StnConfigurator) Dump() (*vppcalls.StnDetails, error) { + stnDetails, err := plugin.stnHandler.DumpStnRules() if err != nil { return nil, err } - plugin.log.Debugf("found %d configured STN rules", len(rules)) - return rules, nil + plugin.log.Debugf("found %d configured STN rules", len(stnDetails.Rules)) + return stnDetails, nil } // Close GOVPP channel. diff --git a/plugins/vpp/ifplugin/stn_config_test.go b/plugins/vpp/ifplugin/stn_config_test.go index bda1b027ff..7626ac54b6 100644 --- a/plugins/vpp/ifplugin/stn_config_test.go +++ b/plugins/vpp/ifplugin/stn_config_test.go @@ -353,11 +353,12 @@ func TestStnConfiguratorDumpRule(t *testing.T) { // Test rule dump data, err := plugin.Dump() Expect(err).To(BeNil()) - Expect(data).ToNot(BeNil()) - Expect(data).To(HaveLen(1)) - Expect(data[0].SwIfIndex).To(BeEquivalentTo(1)) - Expect(data[0].IPAddress).To(BeEquivalentTo(net.ParseIP("10.0.0.1"))) - Expect(data[0].IsIP4).To(BeEquivalentTo(1)) + Expect(data.Rules).ToNot(BeNil()) + Expect(data.Rules).To(HaveLen(1)) + Expect(data.Rules[0].Interface).To(BeEquivalentTo("if1")) + Expect(data.Rules[0].IpAddress).To(BeEquivalentTo("10.0.0.1")) + Expect(data.Meta).ToNot(BeNil()) + Expect(data.Meta.IfNameToIdx[1]).To(BeEquivalentTo("if1")) } // Resolve new interface for STN diff --git a/plugins/vpp/ifplugin/vppcalls/dump_stn_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/dump_stn_vppcalls.go index a340414278..5c0ce290bd 100644 --- a/plugins/vpp/ifplugin/vppcalls/dump_stn_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/dump_stn_vppcalls.go @@ -17,18 +17,37 @@ package vppcalls import ( "time" - "github.com/ligato/vpp-agent/plugins/vpp/binapi/stn" + "net" + + stnapi "github.com/ligato/vpp-agent/plugins/vpp/binapi/stn" + "github.com/ligato/vpp-agent/plugins/vpp/model/stn" ) -func (handler *stnVppHandler) DumpStnRules() (rules []*stn.StnRulesDetails, err error) { +// StnDetails contains proto-modelled STN data and vpp specific metadata +type StnDetails struct { + Rules []*stn.STN_Rule + Meta *StnMeta +} + +// StnMeta contains map of interface name/index +type StnMeta struct { + IfNameToIdx map[uint32]string +} + +func (handler *stnVppHandler) DumpStnRules() (rules *StnDetails, err error) { defer func(t time.Time) { - handler.stopwatch.TimeLog(stn.StnRulesDump{}).LogTimeEntry(time.Since(t)) + handler.stopwatch.TimeLog(stnapi.StnRulesDump{}).LogTimeEntry(time.Since(t)) }(time.Now()) - req := &stn.StnRulesDump{} + var ruleList []*stn.STN_Rule + meta := &StnMeta{ + IfNameToIdx: make(map[uint32]string), + } + + req := &stnapi.StnRulesDump{} reqCtx := handler.callsChannel.SendMultiRequest(req) for { - msg := &stn.StnRulesDetails{} + msg := &stnapi.StnRulesDetails{} stop, err := reqCtx.ReceiveReply(msg) if stop { break @@ -36,8 +55,28 @@ func (handler *stnVppHandler) DumpStnRules() (rules []*stn.StnRulesDetails, err if err != nil { return nil, err } - rules = append(rules, msg) + ifName, _, found := handler.ifIndexes.LookupName(msg.SwIfIndex) + if !found { + handler.log.Warnf("STN dump: name not found for interface %d", msg.SwIfIndex) + } + + var stnStrIP string + if msg.IsIP4 == 1 { + var stnIP net.IP = msg.IPAddress[12:] + stnStrIP = stnIP.To4().String() + } else { + var stnIP net.IP = msg.IPAddress + stnStrIP = stnIP.To16().String() + } + ruleList = append(ruleList, &stn.STN_Rule{ + IpAddress: stnStrIP, + Interface: ifName, + }) + meta.IfNameToIdx[msg.SwIfIndex] = ifName } - return rules, nil + return &StnDetails{ + Rules: ruleList, + Meta: meta, + }, nil } diff --git a/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go b/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go index b9aaa3eca5..28f8110165 100644 --- a/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go +++ b/plugins/vpp/ifplugin/vppcalls/stn_vppcalls_test.go @@ -18,7 +18,10 @@ import ( "net" "testing" + "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/vpp-agent/idxvpp/nametoidx" "github.com/ligato/vpp-agent/plugins/vpp/binapi/stn" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" "github.com/ligato/vpp-agent/tests/vppcallmock" . "github.com/onsi/gomega" @@ -115,7 +118,8 @@ func TestDelStnRule(t *testing.T) { func stnTestSetup(t *testing.T) (*vppcallmock.TestCtx, vppcalls.StnVppAPI) { ctx := vppcallmock.SetupTestCtx(t) - stnHandler, err := vppcalls.NewStnVppHandler(ctx.MockChannel, nil) + ifIndexes := ifaceidx.NewSwIfIndex(nametoidx.NewNameToIdx(logrus.DefaultLogger(), "stn-if-idx", nil)) + stnHandler, err := vppcalls.NewStnVppHandler(ctx.MockChannel, ifIndexes, logrus.DefaultLogger(), nil) Expect(err).To(BeNil()) return ctx, stnHandler } From 3fa6adc5bc00fb23e31b5d30c563e05efa3d203d Mon Sep 17 00:00:00 2001 From: Vladimir Lavor Date: Mon, 30 Jul 2018 13:26:47 +0200 Subject: [PATCH 066/174] implemented dump for IPSec + rest support Signed-off-by: Vladimir Lavor --- plugins/rest/README.md | 24 +- plugins/rest/plugin_impl_rest.go | 32 +- plugins/rest/rest_handlers.go | 41 ++ plugins/rest/resturl/rest_url.go | 26 ++ plugins/vpp/ifplugin/data_resync.go | 27 +- plugins/vpp/ifplugin/data_resync_test.go | 7 +- plugins/vpp/ifplugin/vppcalls/api_vppcalls.go | 21 +- plugins/vpp/ipsecplugin/ipsec_config.go | 13 +- .../vpp/ipsecplugin/vppcalls/api_vppcalls.go | 30 +- .../vpp/ipsecplugin/vppcalls/dump_vppcalls.go | 385 ++++++++++++++++++ 10 files changed, 552 insertions(+), 54 deletions(-) create mode 100644 plugins/vpp/ipsecplugin/vppcalls/dump_vppcalls.go diff --git a/plugins/rest/README.md b/plugins/rest/README.md index b5c8859b74..286d7941a3 100644 --- a/plugins/rest/README.md +++ b/plugins/rest/README.md @@ -51,6 +51,25 @@ curl http://0.0.0.0:9191/vpp/dump/v1/bfd/sessions curl http://0.0.0.0:9191/vpp/dump/v1/bfd/authkeys ``` +**NAT** + +REST plugin allows to dump NAT44 global configuration, DNAT configuration or both of them together. +SNAT is currently not supported in the model, so REST dump is not available as well. + +``` +curl http://0.0.0.0:9191/vpp/dump/v1/nat +curl http://0.0.0.0:9191/vpp/dump/v1/nat/global +curl http://0.0.0.0:9191/vpp/dump/v1/nat/dnat +``` + +**STN** + +Steal the NIC feature REST API contains one uri returning the list of STN rules. + +``` +curl http://0.0.0.0:9191/vpp/dump/v1/stn +``` + **L2 plugin** Support for bridge domains, FIBs and cross connects. It is also possible to get all @@ -72,10 +91,5 @@ curl http://0.0.0.0:9191/vpp/dump/v1/routes curl http://0.0.0.0:9191/arps ``` -Configure an IP ACL: -``` -curl -H "Content-Type: application/json" -X POST -d '' http://0.0.0.0:9191/interface/acl/ip -``` - ## Logging mechanism The REST API request is logged to stdout. The log contains VPPCLI command and VPPCLI response. It is searchable in elastic search using "VPPCLI". \ No newline at end of file diff --git a/plugins/rest/plugin_impl_rest.go b/plugins/rest/plugin_impl_rest.go index 7ffa98cf0c..2d8324b08d 100644 --- a/plugins/rest/plugin_impl_rest.go +++ b/plugins/rest/plugin_impl_rest.go @@ -28,6 +28,7 @@ import ( "github.com/ligato/vpp-agent/plugins/vpp" aclvppcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" + "github.com/ligato/vpp-agent/plugins/vpp/ipsecplugin/vppcalls" l2vppcalls "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" l3vppcalls "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" ) @@ -52,13 +53,16 @@ type Plugin struct { dumpChan api.Channel // Handlers - aclHandler aclvppcalls.AclVppRead - ifHandler ifvppcalls.IfVppRead - bfdHandler ifvppcalls.BfdVppRead - bdHandler l2vppcalls.BridgeDomainVppRead - fibHandler l2vppcalls.FibVppRead - xcHandler l2vppcalls.XConnectVppRead - rtHandler l3vppcalls.RouteVppRead + aclHandler aclvppcalls.AclVppRead + ifHandler ifvppcalls.IfVppRead + bfdHandler ifvppcalls.BfdVppRead + natHandler ifvppcalls.NatVppRead + stnHandler ifvppcalls.StnVppRead + ipSecHandler vppcalls.IPSecVPPRead + bdHandler l2vppcalls.BridgeDomainVppRead + fibHandler l2vppcalls.FibVppRead + xcHandler l2vppcalls.XConnectVppRead + rtHandler l3vppcalls.RouteVppRead sync.Mutex } @@ -92,6 +96,7 @@ func (plugin *Plugin) Init() (err error) { // Indexes ifIndexes := plugin.VPP.GetSwIfIndexes() bdIndexes := plugin.VPP.GetBDIndexes() + spdIndexes := plugin.VPP.GetIPSecSPDIndexes() // Initialize handlers if plugin.aclHandler, err = aclvppcalls.NewAclVppHandler(plugin.vppChan, plugin.dumpChan, nil); err != nil { @@ -103,6 +108,15 @@ func (plugin *Plugin) Init() (err error) { if plugin.bfdHandler, err = ifvppcalls.NewBfdVppHandler(plugin.vppChan, ifIndexes, plugin.Log, nil); err != nil { return err } + if plugin.natHandler, err = ifvppcalls.NewNatVppHandler(plugin.vppChan, plugin.dumpChan, ifIndexes, plugin.Log, nil); err != nil { + return err + } + if plugin.stnHandler, err = ifvppcalls.NewStnVppHandler(plugin.vppChan, ifIndexes, plugin.Log, nil); err != nil { + return err + } + if plugin.ipSecHandler, err = vppcalls.NewIPsecVppHandler(plugin.vppChan, ifIndexes, spdIndexes, plugin.Log, nil); err != nil { + return err + } if plugin.bdHandler, err = l2vppcalls.NewBridgeDomainVppHandler(plugin.vppChan, ifIndexes, plugin.Log, nil); err != nil { return err } @@ -131,7 +145,6 @@ func (plugin *Plugin) Init() (err error) { {Name: "Bridge domain IDs", Path: resturl.BdId}, {Name: "L2Fibs", Path: resturl.Fib}, {Name: "XConnectorPairs", Path: resturl.Xc}, - {Name: "Static routes", Path: resturl.Routes}, {Name: "ARPs", Path: "/arps"}, {Name: "Telemetry", Path: "/telemetry"}, @@ -146,6 +159,9 @@ func (plugin *Plugin) AfterInit() (err error) { plugin.registerAccessListHandlers() plugin.registerInterfaceHandlers() plugin.registerBfdHandlers() + plugin.registerNatHandlers() + plugin.registerStnHandlers() + plugin.registerIPSecHandlers() plugin.registerL2Handlers() plugin.registerL3Handlers() diff --git a/plugins/rest/rest_handlers.go b/plugins/rest/rest_handlers.go index 5a40653a3f..f0d7aae67f 100644 --- a/plugins/rest/rest_handlers.go +++ b/plugins/rest/rest_handlers.go @@ -80,6 +80,7 @@ func (plugin *Plugin) registerInterfaceHandlers() { }) } +// Registers BFD REST handlers func (plugin *Plugin) registerBfdHandlers() { // GET BFD configuration plugin.registerHTTPHandler(resturl.BfdUrl, GET, func() (interface{}, error) { @@ -95,6 +96,46 @@ func (plugin *Plugin) registerBfdHandlers() { }) } +// Registers NAT REST handlers +func (plugin *Plugin) registerNatHandlers() { + // GET NAT configuration + plugin.registerHTTPHandler(resturl.NatUrl, GET, func() (interface{}, error) { + return plugin.natHandler.Nat44Dump() + }) + // GET NAT global config + plugin.registerHTTPHandler(resturl.NatGlobal, GET, func() (interface{}, error) { + return plugin.natHandler.Nat44GlobalConfigDump() + }) + // GET DNAT config + plugin.registerHTTPHandler(resturl.NatDNat, GET, func() (interface{}, error) { + return plugin.natHandler.NAT44DNatDump() + }) +} + +// Registers STN REST handlers +func (plugin *Plugin) registerStnHandlers() { + // GET STN configuration + plugin.registerHTTPHandler(resturl.StnUrl, GET, func() (interface{}, error) { + return plugin.stnHandler.DumpStnRules() + }) +} + +// Registers IPSec REST handlers +func (plugin *Plugin) registerIPSecHandlers() { + // GET IPSec SPD configuration + plugin.registerHTTPHandler(resturl.IPSecSpd, GET, func() (interface{}, error) { + return plugin.ipSecHandler.DumpIPSecSPD() + }) + // GET IPSec SA configuration + plugin.registerHTTPHandler(resturl.IPSecSa, GET, func() (interface{}, error) { + return plugin.ipSecHandler.DumpIPSecSA() + }) + // GET IPSec Tunnel configuration + plugin.registerHTTPHandler(resturl.IPSecTnIf, GET, func() (interface{}, error) { + return plugin.ipSecHandler.DumpIPSecTunnelInterfaces() + }) +} + // Registers L2 plugin REST handlers func (plugin *Plugin) registerL2Handlers() { // GET bridge domain IDs diff --git a/plugins/rest/resturl/rest_url.go b/plugins/rest/resturl/rest_url.go index 96728185af..84683d07b6 100644 --- a/plugins/rest/resturl/rest_url.go +++ b/plugins/rest/resturl/rest_url.go @@ -50,6 +50,32 @@ const ( VxLan = "/vpp/dump/v1/interfaces/vxlan" ) +// NAT REST urls +const ( + // NatUrl is a REST path of a NAT + NatUrl = "/vpp/dump/v1/nat" + // NatGlobal is a REST path of a global NAT config + NatGlobal = "/vpp/dump/v1/nat/global" + // NatDNat is a REST path of a DNAT configurations + NatDNat = "/vpp/dump/v1/nat/dnat" +) + +// STN REST url +const ( + // StnUrl is a REST path of a STN + StnUrl = "/vpp/dump/v1/stn" +) + +// IPSec REST url +const ( + // IPSecSpd is a REST path of a IPSec SPD + IPSecSpd = "/vpp/dump/v1/ipsec/spd" + // IPSecSa is a REST path of a IPSec SA + IPSecSa = "/vpp/dump/v1/ipsec/sa" + // IPSecTnIf is a REST path of a IPSec Tunnels + IPSecTnIf = "/vpp/dump/v1/ipsec/tunnel" +) + // L2 plugin const ( // restBd is rest bridge domain path diff --git a/plugins/vpp/ifplugin/data_resync.go b/plugins/vpp/ifplugin/data_resync.go index 72b4f5a378..027e62c772 100644 --- a/plugins/vpp/ifplugin/data_resync.go +++ b/plugins/vpp/ifplugin/data_resync.go @@ -376,42 +376,35 @@ func (plugin *StnConfigurator) Resync(nbStnRules []*stn.STN_Rule) error { plugin.clearMapping() // Dump existing STN Rules - vppStnRules, err := plugin.Dump() + vppStnDetails, err := plugin.Dump() if err != nil { return err } // Correlate configuration, and remove obsolete rules STN rules var wasErr error - for _, vppStnRule := range vppStnRules { + for _, vppStnRule := range vppStnDetails.Rules { // Parse parameters var vppStnIP net.IP var vppStnIPStr string - if vppStnRule.IsIP4 == 1 { - vppStnIP = vppStnRule.IPAddress[:4] - } else { - vppStnIP = vppStnRule.IPAddress - } - vppStnIPStr = vppStnIP.String() - - vppStnIfName, _, found := plugin.ifIndexes.LookupName(vppStnRule.SwIfIndex) + vppStnIfIdx, _, found := plugin.ifIndexes.LookupIdx(vppStnRule.Interface) if !found { // The rule is attached to non existing interface but it can be removed. If there is a similar // rule in NB config, it will be configured (or cached) - if err := plugin.stnHandler.DelStnRule(vppStnRule.SwIfIndex, &vppStnIP); err != nil { + if err := plugin.stnHandler.DelStnRule(vppStnIfIdx, &vppStnIP); err != nil { plugin.log.Error(err) wasErr = err } plugin.log.Debugf("RESYNC STN: rule IP: %v ifIdx: %v removed due to missing interface, will be reconfigured if needed", - vppStnIPStr, vppStnRule.SwIfIndex) + vppStnIPStr, vppStnIfIdx) continue } // Look for equal rule in NB configuration var match bool for _, nbStnRule := range nbStnRules { - if nbStnRule.IpAddress == vppStnIPStr && nbStnRule.Interface == vppStnIfName { + if nbStnRule.IpAddress == vppStnIPStr && nbStnRule.Interface == vppStnRule.Interface { // Register existing rule plugin.indexSTNRule(nbStnRule, false) match = true @@ -421,11 +414,11 @@ func (plugin *StnConfigurator) Resync(nbStnRules []*stn.STN_Rule) error { // If STN rule does not exist, it is obsolete if !match { - if err := plugin.stnHandler.DelStnRule(vppStnRule.SwIfIndex, &vppStnIP); err != nil { + if err := plugin.stnHandler.DelStnRule(vppStnIfIdx, &vppStnIP); err != nil { plugin.log.Error(err) wasErr = err } - plugin.log.Debugf("RESYNC STN: rule IP: %v ifName: %v removed as obsolete", vppStnIPStr, vppStnIfName) + plugin.log.Debugf("RESYNC STN: rule IP: %v ifName: %v removed as obsolete", vppStnIPStr, vppStnRule.Interface) } } @@ -452,7 +445,7 @@ func (plugin *NatConfigurator) ResyncNatGlobal(nbGlobal *nat.Nat44Global) error // Re-initialize cache plugin.clearMapping() - vppNatGlobal, err := plugin.natHandler.Nat44GlobalConfigDump(plugin.ifIndexes) + vppNatGlobal, err := plugin.natHandler.Nat44GlobalConfigDump() if err != nil { return fmt.Errorf("failed to dump NAT44 global config: %v", err) } @@ -471,7 +464,7 @@ func (plugin *NatConfigurator) ResyncSNat(sNatConf []*nat.Nat44SNat_SNatConfig) func (plugin *NatConfigurator) ResyncDNat(nbDNatConfig []*nat.Nat44DNat_DNatConfig) error { plugin.log.Debug("RESYNC DNAT config.") - vppDNatCfg, err := plugin.natHandler.NAT44DNatDump(plugin.ifIndexes) + vppDNatCfg, err := plugin.natHandler.NAT44DNatDump() if err != nil { return fmt.Errorf("failed to dump DNAT config: %v", err) } diff --git a/plugins/vpp/ifplugin/data_resync_test.go b/plugins/vpp/ifplugin/data_resync_test.go index 25e65eb827..9aba4324a9 100644 --- a/plugins/vpp/ifplugin/data_resync_test.go +++ b/plugins/vpp/ifplugin/data_resync_test.go @@ -1018,11 +1018,6 @@ func TestDataResyncResyncEchoFunction(t *testing.T) { func TestDataResyncResyncStn(t *testing.T) { // Setup plugin, conn := stnConfiguratorTestInitialization(t, []*vppReplyMock{ - { - Name: (&stnApi.StnRulesDump{}).GetMessageName(), - Ping: true, - Message: &stnApi.StnRulesDetails{}, - }, { Name: (&stnApi.StnAddDelRule{}).GetMessageName(), Message: &stnApi.StnAddDelRuleReply{}, @@ -1037,7 +1032,7 @@ func TestDataResyncResyncStn(t *testing.T) { { RuleName: "test", Interface: "if0", - IpAddress: "192.168.0.1/24", + IpAddress: "192.168.0.1", }, } diff --git a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go index 0712d360bb..2db4262247 100644 --- a/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/ifplugin/vppcalls/api_vppcalls.go @@ -22,7 +22,6 @@ import ( "github.com/ligato/cn-infra/logging/measure" "github.com/ligato/vpp-agent/idxvpp" bfd_api "github.com/ligato/vpp-agent/plugins/vpp/binapi/bfd" - "github.com/ligato/vpp-agent/plugins/vpp/binapi/stn" "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" @@ -191,12 +190,14 @@ type NatVppWrite interface { // NatVppRead provides read methods for NAT type NatVppRead interface { + // Nat44Dump retuns global NAT configuration together with the DNAT configs + Nat44Dump() (*Nat44Details, error) // Nat44GlobalConfigDump returns global config in NB format - Nat44GlobalConfigDump(swIfIndices ifaceidx.SwIfIndex) (*nat.Nat44Global, error) + Nat44GlobalConfigDump() (*nat.Nat44Global, error) // NAT44NatDump dumps all types of mappings, sorts it according to tag (DNAT label) and creates a set of DNAT configurations - NAT44DNatDump(swIfIndices ifaceidx.SwIfIndex) (*nat.Nat44DNat, error) + NAT44DNatDump() (*nat.Nat44DNat, error) // Nat44InterfaceDump returns a list of interfaces enabled for NAT44 - Nat44InterfaceDump(swIfIndices ifaceidx.SwIfIndex) (interfaces []*nat.Nat44Global_NatInterface, err error) + Nat44InterfaceDump() (interfaces []*nat.Nat44Global_NatInterface, err error) } // StnVppAPI provides methods for managing STN @@ -216,7 +217,7 @@ type StnVppWrite interface { // StnVppRead provides read methods for STN type StnVppRead interface { // DumpStnRules returns a list of all STN rules configured on the VPP - DumpStnRules() (rules []*stn.StnRulesDetails, err error) + DumpStnRules() (rules *StnDetails, err error) } // ifVppHandler is accessor for interface-related vppcalls methods @@ -239,13 +240,16 @@ type natVppHandler struct { stopwatch *measure.Stopwatch callsChannel api.Channel dumpChannel api.Channel + ifIndexes ifaceidx.SwIfIndex log logging.Logger } // stnVppHandler is accessor for STN-related vppcalls methods type stnVppHandler struct { stopwatch *measure.Stopwatch + ifIndexes ifaceidx.SwIfIndex callsChannel api.Channel + log logging.Logger } // NewIfVppHandler creates new instance of interface vppcalls handler @@ -278,11 +282,12 @@ func NewBfdVppHandler(callsChan api.Channel, ifIndexes ifaceidx.SwIfIndex, log l } // NewNatVppHandler creates new instance of NAT vppcalls handler -func NewNatVppHandler(callsChan, dumpChan api.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*natVppHandler, error) { +func NewNatVppHandler(callsChan, dumpChan api.Channel, ifIndexes ifaceidx.SwIfIndex, log logging.Logger, stopwatch *measure.Stopwatch) (*natVppHandler, error) { handler := &natVppHandler{ callsChannel: callsChan, dumpChannel: dumpChan, stopwatch: stopwatch, + ifIndexes: ifIndexes, log: log, } if err := handler.callsChannel.CheckMessageCompatibility(NatMessages...); err != nil { @@ -293,10 +298,12 @@ func NewNatVppHandler(callsChan, dumpChan api.Channel, log logging.Logger, stopw } // NewStnVppHandler creates new instance of STN vppcalls handler -func NewStnVppHandler(callsChan api.Channel, stopwatch *measure.Stopwatch) (*stnVppHandler, error) { +func NewStnVppHandler(callsChan api.Channel, ifIndexes ifaceidx.SwIfIndex, log logging.Logger, stopwatch *measure.Stopwatch) (*stnVppHandler, error) { handler := &stnVppHandler{ callsChannel: callsChan, + ifIndexes: ifIndexes, stopwatch: stopwatch, + log: log, } if err := handler.callsChannel.CheckMessageCompatibility(StnMessages...); err != nil { return nil, err diff --git a/plugins/vpp/ipsecplugin/ipsec_config.go b/plugins/vpp/ipsecplugin/ipsec_config.go index a808ce3d53..695bc2c879 100644 --- a/plugins/vpp/ipsecplugin/ipsec_config.go +++ b/plugins/vpp/ipsecplugin/ipsec_config.go @@ -30,6 +30,7 @@ import ( iface_vppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" "github.com/ligato/vpp-agent/plugins/vpp/ipsecplugin/ipsecidx" "github.com/ligato/vpp-agent/plugins/vpp/ipsecplugin/vppcalls" + "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" ) @@ -63,7 +64,7 @@ type IPSecConfigurator struct { // VPP API handlers ifHandler iface_vppcalls.IfVppAPI - ipSecHandler vppcalls.IPsecVppAPI + ipSecHandler vppcalls.IPSecVppAPI // Timer used to measure and store time stopwatch *measure.Stopwatch @@ -99,7 +100,7 @@ func (plugin *IPSecConfigurator) Init(logger logging.PluginLogger, goVppMux govp if plugin.ifHandler, err = iface_vppcalls.NewIfVppHandler(plugin.vppCh, plugin.log, plugin.stopwatch); err != nil { return err } - if plugin.ipSecHandler, err = vppcalls.NewIPsecVppHandler(plugin.vppCh, plugin.log, plugin.stopwatch); err != nil { + if plugin.ipSecHandler, err = vppcalls.NewIPsecVppHandler(plugin.vppCh, plugin.ifIndexes, plugin.spdIndexes, plugin.log, plugin.stopwatch); err != nil { return err } @@ -345,7 +346,13 @@ func (plugin *IPSecConfigurator) ConfigureTunnel(tunnel *ipsec.TunnelInterfaces_ return err } - plugin.ifIndexes.RegisterName(tunnel.Name, ifIdx, nil) + // Register with necessary metadta info + plugin.ifIndexes.RegisterName(tunnel.Name, ifIdx, &interfaces.Interfaces_Interface{ + Name: tunnel.Name, + Enabled: tunnel.Enabled, + IpAddresses: tunnel.IpAddresses, + Vrf: tunnel.Vrf, + }) plugin.log.Infof("Registered Tunnel %v (%d)", tunnel.Name, ifIdx) if err := plugin.ifHandler.SetInterfaceVRF(ifIdx, tunnel.Vrf); err != nil { diff --git a/plugins/vpp/ipsecplugin/vppcalls/api_vppcalls.go b/plugins/vpp/ipsecplugin/vppcalls/api_vppcalls.go index 0b8d75a374..6d1a3ae992 100644 --- a/plugins/vpp/ipsecplugin/vppcalls/api_vppcalls.go +++ b/plugins/vpp/ipsecplugin/vppcalls/api_vppcalls.go @@ -18,17 +18,19 @@ import ( govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/measure" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" + "github.com/ligato/vpp-agent/plugins/vpp/ipsecplugin/ipsecidx" "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" ) // IPsecVppAPI provides methods for creating and managing of a IPsec configuration -type IPsecVppAPI interface { - IPsecVppWrite - IPsecVPPRead +type IPSecVppAPI interface { + IPSecVppWrite + IPSecVPPRead } // IPsecVppWrite provides write methods for IPsec -type IPsecVppWrite interface { +type IPSecVppWrite interface { // AddTunnelInterface adds tunnel interface AddTunnelInterface(tunnel *ipsec.TunnelInterfaces_Tunnel) (uint32, error) // DelTunnelInterface removes tunnel interface @@ -51,23 +53,35 @@ type IPsecVppWrite interface { DelSAEntry(saID uint32, sa *ipsec.SecurityAssociations_SA) error } -// IPsecVppWrite provides read methods for IPsec -type IPsecVPPRead interface { - // TODO define dump methods +// IPsecVppWrite provides read methods for IPSec +type IPSecVPPRead interface { + // DumpIPSecSPD returns a list of IPSec security policy databases + DumpIPSecSPD() (spdList []*IPSecSpdDetails, err error) + // DumpIPSecSA returns a list of configured security associations + DumpIPSecSA() (saList []*IPSecSaDetails, err error) + // DumpIPSecSAWithIndex returns a security association with provided index + DumpIPSecSAWithIndex(saID uint32) (saList []*IPSecSaDetails, err error) + // DumpIPSecTunnelInterfaces returns a list of configured IPSec tunnel interfaces + DumpIPSecTunnelInterfaces() (tun *IPSecTunnelInterfaceDetails, err error) } // ipSecVppHandler is accessor for IPsec-related vppcalls methods type ipSecVppHandler struct { stopwatch *measure.Stopwatch callsChannel govppapi.Channel + ifIndexes ifaceidx.SwIfIndex + spdIndexes ipsecidx.SPDIndex // TODO workaround in order to be able to dump at least spds configurator knows about log logging.Logger } // NewIPsecVppHandler creates new instance of IPsec vppcalls handler -func NewIPsecVppHandler(callsChan govppapi.Channel, log logging.Logger, stopwatch *measure.Stopwatch) (*ipSecVppHandler, error) { +func NewIPsecVppHandler(callsChan govppapi.Channel, ifIndexes ifaceidx.SwIfIndex, spdIndexes ipsecidx.SPDIndex, + log logging.Logger, stopwatch *measure.Stopwatch) (*ipSecVppHandler, error) { handler := &ipSecVppHandler{ callsChannel: callsChan, stopwatch: stopwatch, + ifIndexes: ifIndexes, + spdIndexes: spdIndexes, log: log, } if err := handler.callsChannel.CheckMessageCompatibility(IPSecMessages...); err != nil { diff --git a/plugins/vpp/ipsecplugin/vppcalls/dump_vppcalls.go b/plugins/vpp/ipsecplugin/vppcalls/dump_vppcalls.go new file mode 100644 index 0000000000..b927d0dd38 --- /dev/null +++ b/plugins/vpp/ipsecplugin/vppcalls/dump_vppcalls.go @@ -0,0 +1,385 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vppcalls + +import ( + "bytes" + "net" + "strconv" + "time" + + ipsecapi "github.com/ligato/vpp-agent/plugins/vpp/binapi/ipsec" + "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" + "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" +) + +// IPSecSaDetails holds security association with VPP metadata +type IPSecSaDetails struct { + Sa *ipsec.SecurityAssociations_SA + Meta *IPSecSaMeta +} + +// IPSecSaMeta contains all VPP-specific metadata +type IPSecSaMeta struct { + SaID uint32 + Interface string + IfIdx uint32 + CryptoKeyLen uint8 + IntegKeyLen uint8 + Salt uint32 + SeqOutbound uint64 + LastSeqInbound uint64 + ReplayWindow uint64 + TotalDataSize uint64 +} + +func (handler *ipSecVppHandler) DumpIPSecSA() (saList []*IPSecSaDetails, err error) { + return handler.DumpIPSecSAWithIndex(^uint32(0)) // Get everything +} + +func (handler *ipSecVppHandler) DumpIPSecSAWithIndex(saID uint32) (saList []*IPSecSaDetails, err error) { + defer func(t time.Time) { + handler.stopwatch.TimeLog(ipsecapi.IpsecSaDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) + + saDetails, err := handler.dumpSecurityAssociations(saID) + if err != nil { + return nil, err + } + + for _, saData := range saDetails { + // Skip tunnel interfaces + if saData.SwIfIndex != ^uint32(0) { + continue + } + + // Addresses + var tunnelSrcAddrStr, tunnelDstAddrStr string + if uintToBool(saData.IsTunnelIP6) { + var tunnelSrcAddr, tunnelDstAddr net.IP = saData.TunnelSrcAddr, saData.TunnelDstAddr + tunnelSrcAddrStr, tunnelDstAddrStr = tunnelSrcAddr.String(), tunnelDstAddr.String() + } else { + var tunnelSrcAddr, tunnelDstAddr net.IP = saData.TunnelSrcAddr[:4], saData.TunnelDstAddr[:4] + tunnelSrcAddrStr, tunnelDstAddrStr = tunnelSrcAddr.String(), tunnelDstAddr.String() + } + + sa := &ipsec.SecurityAssociations_SA{ + Spi: saData.Spi, + Protocol: getSaProto(saData.Protocol), + CryptoAlg: getCryptoAlg(saData.CryptoAlg), + CryptoKey: string(bytes.SplitN(saData.CryptoKey, []byte{0x00}, 2)[0]), + IntegAlg: getIntegAlg(saData.IntegAlg), + IntegKey: string(bytes.SplitN(saData.IntegKey, []byte{0x00}, 2)[0]), + UseEsn: uintToBool(saData.UseEsn), + UseAntiReplay: uintToBool(saData.UseAntiReplay), + TunnelSrcAddr: tunnelSrcAddrStr, + TunnelDstAddr: tunnelDstAddrStr, + EnableUdpEncap: uintToBool(saData.UDPEncap), + } + meta := &IPSecSaMeta{ + SaID: saData.SaID, + IfIdx: saData.SwIfIndex, + CryptoKeyLen: saData.CryptoKeyLen, + IntegKeyLen: saData.IntegKeyLen, + Salt: saData.Salt, + SeqOutbound: saData.SeqOutbound, + LastSeqInbound: saData.LastSeqInbound, + ReplayWindow: saData.ReplayWindow, + TotalDataSize: saData.TotalDataSize, + } + saList = append(saList, &IPSecSaDetails{ + Sa: sa, + Meta: meta, + }) + } + + return saList, nil +} + +// IPSecTunnelInterfaceDetails hold a list of tunnel interfaces with name/index map as metadata +type IPSecTunnelInterfaceDetails struct { + Tunnels []*ipsec.TunnelInterfaces_Tunnel + Meta *IPSecTunnelMeta +} + +// IPSecTunnelMeta contains map of name/index pairs +type IPSecTunnelMeta struct { + IfNameToIdx map[uint32]string +} + +func (handler *ipSecVppHandler) DumpIPSecTunnelInterfaces() (tun *IPSecTunnelInterfaceDetails, err error) { + defer func(t time.Time) { + handler.stopwatch.TimeLog(ipsecapi.IpsecSaDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) + + var tunnels []*ipsec.TunnelInterfaces_Tunnel + meta := &IPSecTunnelMeta{ + IfNameToIdx: make(map[uint32]string), + } + saDetails, err := handler.dumpSecurityAssociations(^uint32(0)) + if err != nil { + return nil, err + } + + for _, saData := range saDetails { + // Skip non-tunnel security associations + if saData.SwIfIndex == ^uint32(0) { + continue + } + + // Interface + var ifName string + var ifData *interfaces.Interfaces_Interface + if saData.SwIfIndex != ^uint32(1) { + var found bool + ifName, ifData, found = handler.ifIndexes.LookupName(saData.SwIfIndex) + if !found { + handler.log.Warnf("IPSec SA dump: interface name not found for %d", saData.SwIfIndex) + continue + } + if ifData == nil { + handler.log.Warnf("IPSec SA dump: interface %s has no metadata", ifName) + continue + } + } + + // Addresses + var tunnelSrcAddrStr, tunnelDstAddrStr string + if uintToBool(saData.IsTunnelIP6) { + var tunnelSrcAddr, tunnelDstAddr net.IP = saData.TunnelSrcAddr, saData.TunnelDstAddr + tunnelSrcAddrStr, tunnelDstAddrStr = tunnelSrcAddr.String(), tunnelDstAddr.String() + } else { + var tunnelSrcAddr, tunnelDstAddr net.IP = saData.TunnelSrcAddr[:4], saData.TunnelDstAddr[:4] + tunnelSrcAddrStr, tunnelDstAddrStr = tunnelSrcAddr.String(), tunnelDstAddr.String() + } + + // Prepare tunnel interface data + tunnel := &ipsec.TunnelInterfaces_Tunnel{ + Name: ifName, + Esn: uintToBool(saData.UseEsn), + AntiReplay: uintToBool(saData.UseAntiReplay), + LocalIp: tunnelSrcAddrStr, + RemoteIp: tunnelDstAddrStr, + LocalSpi: saData.Spi, + RemoteSpi: saData.Spi, + CryptoAlg: getCryptoAlg(saData.CryptoAlg), + IntegAlg: getIntegAlg(saData.IntegAlg), + Enabled: ifData.Enabled, + IpAddresses: ifData.IpAddresses, + Vrf: ifData.Vrf, + } + tunnels = append(tunnels, tunnel) + + // Put metadata entry + meta.IfNameToIdx[saData.SwIfIndex] = ifName + } + + return &IPSecTunnelInterfaceDetails{ + Tunnels: tunnels, + Meta: meta, + }, nil +} + +// IPSecSpdDetails represents IPSec policy databases with particular metadata +type IPSecSpdDetails struct { + Spd *ipsec.SecurityPolicyDatabases_SPD + Meta *IPSecSpdMeta +} + +// IPSecSpdMeta is map where key is a generated security association name, and value is an SpdMeta object +type IPSecSpdMeta struct { + SpdMeta map[string]*SpdMeta // SA-generated name is a key +} + +// SpdMeta hold VPP-specific data related to SPD +type SpdMeta struct { + SaID uint32 + SpdID uint32 + Policy uint8 + Bytes uint64 + Packets uint64 +} + +func (handler *ipSecVppHandler) DumpIPSecSPD() (spdList []*IPSecSpdDetails, err error) { + defer func(t time.Time) { + handler.stopwatch.TimeLog(ipsecapi.IpsecSpdDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) + + metadata := &IPSecSpdMeta{ + SpdMeta: make(map[string]*SpdMeta), + } + + // TODO IPSec SPD dump request requires SPD ID, otherwise it returns nothing. There is currently no way + // to dump all SPDs available on the VPP, so let's dump at least the ones configurator knows about. + for _, spdName := range handler.spdIndexes.GetMapping().ListNames() { + spdIdx, _, found := handler.spdIndexes.LookupIdx(spdName) + if !found { + // Shouldn't happen, call the police or something + continue + } + spd := &ipsec.SecurityPolicyDatabases_SPD{} + + // Prepare VPP binapi request + req := &ipsecapi.IpsecSpdDump{ + SpdID: spdIdx, + SaID: 0xffffffff, + } + requestCtx := handler.callsChannel.SendMultiRequest(req) + + // Policy association index, used to generate SA name + var paIdx int + + for { + spdDetails := &ipsecapi.IpsecSpdDetails{} + stop, err := requestCtx.ReceiveReply(spdDetails) + if stop { + break + } + if err != nil { + return nil, err + } + + // Security association name, to distinguish metadata + saGenName := "sa-generated-" + strconv.Itoa(paIdx) + paIdx++ + + // Addresses + var remoteStartAddrStr, remoteStopAddrStr, localStartAddrStr, localStopAddrStr string + if uintToBool(spdDetails.IsIpv6) { + var remoteStartAddr, remoteStopAddr net.IP = spdDetails.RemoteStartAddr, spdDetails.RemoteStopAddr + remoteStartAddrStr, remoteStopAddrStr = remoteStartAddr.String(), remoteStopAddr.String() + var localStartAddr, localStopAddr net.IP = spdDetails.LocalStartAddr, spdDetails.LocalStopAddr + localStartAddrStr, localStopAddrStr = localStartAddr.String(), localStopAddr.String() + } else { + var remoteStartAddr, remoteStopAddr net.IP = spdDetails.RemoteStartAddr[:4], spdDetails.RemoteStopAddr[:4] + remoteStartAddrStr, remoteStopAddrStr = remoteStartAddr.String(), remoteStopAddr.String() + var localStartAddr, localStopAddr net.IP = spdDetails.LocalStartAddr[:4], spdDetails.LocalStopAddr[:4] + localStartAddrStr, localStopAddrStr = localStartAddr.String(), localStopAddr.String() + } + + // Prepare policy entry and put to the SPD + policyEntry := &ipsec.SecurityPolicyDatabases_SPD_PolicyEntry{ + Sa: saGenName, + Priority: spdDetails.Priority, + IsOutbound: uintToBool(spdDetails.IsOutbound), + RemoteAddrStart: remoteStartAddrStr, + RemoteAddrStop: remoteStopAddrStr, + LocalAddrStart: localStartAddrStr, + LocalAddrStop: localStopAddrStr, + Protocol: uint32(spdDetails.Protocol), + RemotePortStart: uint32(spdDetails.RemoteStartPort), + RemotePortStop: uint32(spdDetails.RemoteStopPort), + LocalPortStart: uint32(spdDetails.LocalStartPort), + LocalPortStop: uint32(spdDetails.LocalStopPort), + } + spd.PolicyEntries = append(spd.PolicyEntries, policyEntry) + + // Prepare meta and put to the metadata map + meta := &SpdMeta{ + SpdID: spdDetails.SpdID, + SaID: spdDetails.SaID, + Policy: spdDetails.Policy, + Bytes: spdDetails.Bytes, + Packets: spdDetails.Packets, + } + metadata.SpdMeta[saGenName] = meta + } + // Store STD in list + spdList = append(spdList, &IPSecSpdDetails{ + Spd: spd, + Meta: metadata, + }) + } + + return spdList, nil +} + +// Get all security association (used also for tunnel interfaces) in binary api format +func (handler *ipSecVppHandler) dumpSecurityAssociations(saID uint32) (saList []*ipsecapi.IpsecSaDetails, err error) { + defer func(t time.Time) { + handler.stopwatch.TimeLog(ipsecapi.IpsecSaDump{}).LogTimeEntry(time.Since(t)) + }(time.Now()) + + req := &ipsecapi.IpsecSaDump{ + SaID: saID, + } + requestCtx := handler.callsChannel.SendMultiRequest(req) + + for { + saDetails := &ipsecapi.IpsecSaDetails{} + stop, err := requestCtx.ReceiveReply(saDetails) + if stop { + break + } + if err != nil { + return nil, err + } + + saList = append(saList, saDetails) + } + + return saList, nil + +} + +func getCryptoAlg(alg uint8) ipsec.CryptoAlgorithm { + switch alg { + case 0: + return ipsec.CryptoAlgorithm_NONE_CRYPTO + case 1: + return ipsec.CryptoAlgorithm_AES_CBC_128 + case 2: + return ipsec.CryptoAlgorithm_AES_CBC_192 + case 3: + return ipsec.CryptoAlgorithm_AES_CBC_256 + default: + return ipsec.CryptoAlgorithm_NONE_CRYPTO // As default + } +} + +func getIntegAlg(alg uint8) ipsec.IntegAlgorithm { + switch alg { + case 0: + return ipsec.IntegAlgorithm_NONE_INTEG + case 1: + return ipsec.IntegAlgorithm_MD5_96 + case 2: + return ipsec.IntegAlgorithm_SHA1_96 + case 3: + return ipsec.IntegAlgorithm_SHA_256_96 + case 4: + return ipsec.IntegAlgorithm_SHA_256_128 + case 5: + return ipsec.IntegAlgorithm_SHA_384_192 + case 6: + return ipsec.IntegAlgorithm_SHA_512_256 + default: + return ipsec.IntegAlgorithm_NONE_INTEG // As default + } +} + +func getSaProto(protocol uint8) ipsec.SecurityAssociations_SA_IPSecProtocol { + if protocol == 0 { + return ipsec.SecurityAssociations_SA_AH + } + return ipsec.SecurityAssociations_SA_ESP +} + +func uintToBool(input uint8) bool { + if input == 1 { + return true + } + return false +} From 45de5a39fd72426bb5d211e2f67ee0dcf595d2c2 Mon Sep 17 00:00:00 2001 From: Ondrej Fabry Date: Mon, 30 Jul 2018 13:29:40 +0200 Subject: [PATCH 067/174] Update deps and dont prune unused packages from cn-infra Signed-off-by: Ondrej Fabry --- Gopkg.lock | 173 +- Gopkg.toml | 4 + vendor/github.com/Shopify/sarama/.gitignore | 26 + vendor/github.com/Shopify/sarama/.travis.yml | 36 + vendor/github.com/Shopify/sarama/CHANGELOG.md | 541 ++++ vendor/github.com/Shopify/sarama/LICENSE | 20 + vendor/github.com/Shopify/sarama/Makefile | 30 + vendor/github.com/Shopify/sarama/README.md | 39 + vendor/github.com/Shopify/sarama/Vagrantfile | 20 + .../github.com/Shopify/sarama/acl_bindings.go | 119 + .../Shopify/sarama/acl_create_request.go | 76 + .../Shopify/sarama/acl_create_response.go | 88 + .../Shopify/sarama/acl_delete_request.go | 48 + .../Shopify/sarama/acl_delete_response.go | 155 ++ .../Shopify/sarama/acl_describe_request.go | 25 + .../Shopify/sarama/acl_describe_response.go | 80 + .../github.com/Shopify/sarama/acl_filter.go | 61 + vendor/github.com/Shopify/sarama/acl_types.go | 42 + .../sarama/add_offsets_to_txn_request.go | 52 + .../sarama/add_offsets_to_txn_response.go | 44 + .../sarama/add_partitions_to_txn_request.go | 76 + .../sarama/add_partitions_to_txn_response.go | 108 + .../Shopify/sarama/alter_configs_request.go | 120 + .../Shopify/sarama/alter_configs_response.go | 95 + .../Shopify/sarama/api_versions_request.go | 24 + .../Shopify/sarama/api_versions_response.go | 87 + .../Shopify/sarama/async_producer.go | 921 +++++++ vendor/github.com/Shopify/sarama/broker.go | 883 +++++++ vendor/github.com/Shopify/sarama/client.go | 846 ++++++ vendor/github.com/Shopify/sarama/config.go | 458 ++++ .../Shopify/sarama/config_resource_type.go | 15 + vendor/github.com/Shopify/sarama/consumer.go | 808 ++++++ .../Shopify/sarama/consumer_group_members.go | 94 + .../sarama/consumer_metadata_request.go | 33 + .../sarama/consumer_metadata_response.go | 77 + .../github.com/Shopify/sarama/crc32_field.go | 69 + .../sarama/create_partitions_request.go | 121 + .../sarama/create_partitions_response.go | 94 + .../Shopify/sarama/create_topics_request.go | 174 ++ .../Shopify/sarama/create_topics_response.go | 112 + .../Shopify/sarama/delete_groups_request.go | 30 + .../Shopify/sarama/delete_groups_response.go | 70 + .../Shopify/sarama/delete_records_request.go | 126 + .../Shopify/sarama/delete_records_response.go | 158 ++ .../Shopify/sarama/delete_topics_request.go | 48 + .../Shopify/sarama/delete_topics_response.go | 78 + .../sarama/describe_configs_request.go | 91 + .../sarama/describe_configs_response.go | 188 ++ .../Shopify/sarama/describe_groups_request.go | 30 + .../sarama/describe_groups_response.go | 187 ++ vendor/github.com/Shopify/sarama/dev.yml | 10 + .../Shopify/sarama/encoder_decoder.go | 89 + .../Shopify/sarama/end_txn_request.go | 50 + .../Shopify/sarama/end_txn_response.go | 44 + vendor/github.com/Shopify/sarama/errors.go | 281 ++ .../Shopify/sarama/fetch_request.go | 170 ++ .../Shopify/sarama/fetch_response.go | 385 +++ .../sarama/find_coordinator_request.go | 61 + .../sarama/find_coordinator_response.go | 92 + .../Shopify/sarama/heartbeat_request.go | 47 + .../Shopify/sarama/heartbeat_response.go | 32 + .../sarama/init_producer_id_request.go | 43 + .../sarama/init_producer_id_response.go | 55 + .../Shopify/sarama/join_group_request.go | 163 ++ .../Shopify/sarama/join_group_response.go | 135 + .../Shopify/sarama/leave_group_request.go | 40 + .../Shopify/sarama/leave_group_response.go | 32 + .../github.com/Shopify/sarama/length_field.go | 69 + .../Shopify/sarama/list_groups_request.go | 24 + .../Shopify/sarama/list_groups_response.go | 69 + vendor/github.com/Shopify/sarama/message.go | 223 ++ .../github.com/Shopify/sarama/message_set.go | 102 + .../Shopify/sarama/metadata_request.go | 88 + .../Shopify/sarama/metadata_response.go | 310 +++ vendor/github.com/Shopify/sarama/metrics.go | 51 + .../github.com/Shopify/sarama/mockbroker.go | 330 +++ .../Shopify/sarama/mockresponses.go | 540 ++++ .../github.com/Shopify/sarama/mocks/README.md | 13 + .../Shopify/sarama/mocks/async_producer.go | 174 ++ .../Shopify/sarama/mocks/consumer.go | 315 +++ .../github.com/Shopify/sarama/mocks/mocks.go | 48 + .../Shopify/sarama/mocks/sync_producer.go | 157 ++ .../Shopify/sarama/offset_commit_request.go | 204 ++ .../Shopify/sarama/offset_commit_response.go | 85 + .../Shopify/sarama/offset_fetch_request.go | 81 + .../Shopify/sarama/offset_fetch_response.go | 143 + .../Shopify/sarama/offset_manager.go | 560 ++++ .../Shopify/sarama/offset_request.go | 132 + .../Shopify/sarama/offset_response.go | 174 ++ .../Shopify/sarama/packet_decoder.go | 60 + .../Shopify/sarama/packet_encoder.go | 65 + .../github.com/Shopify/sarama/partitioner.go | 135 + .../github.com/Shopify/sarama/prep_encoder.go | 153 ++ .../Shopify/sarama/produce_request.go | 252 ++ .../Shopify/sarama/produce_response.go | 183 ++ .../github.com/Shopify/sarama/produce_set.go | 252 ++ .../github.com/Shopify/sarama/real_decoder.go | 324 +++ .../github.com/Shopify/sarama/real_encoder.go | 156 ++ vendor/github.com/Shopify/sarama/record.go | 113 + .../github.com/Shopify/sarama/record_batch.go | 268 ++ vendor/github.com/Shopify/sarama/records.go | 173 ++ vendor/github.com/Shopify/sarama/request.go | 149 ++ .../Shopify/sarama/response_header.go | 21 + vendor/github.com/Shopify/sarama/sarama.go | 99 + .../Shopify/sarama/sasl_handshake_request.go | 33 + .../Shopify/sarama/sasl_handshake_response.go | 38 + .../Shopify/sarama/sync_group_request.go | 100 + .../Shopify/sarama/sync_group_response.go | 41 + .../Shopify/sarama/sync_producer.go | 164 ++ vendor/github.com/Shopify/sarama/timestamp.go | 40 + .../sarama/txn_offset_commit_request.go | 126 + .../sarama/txn_offset_commit_response.go | 83 + vendor/github.com/Shopify/sarama/utils.go | 212 ++ .../github.com/bsm/sarama-cluster/.gitignore | 4 + .../github.com/bsm/sarama-cluster/.travis.yml | 19 + .../github.com/bsm/sarama-cluster/Gopkg.lock | 154 ++ .../github.com/bsm/sarama-cluster/Gopkg.toml | 26 + vendor/github.com/bsm/sarama-cluster/LICENSE | 22 + vendor/github.com/bsm/sarama-cluster/Makefile | 35 + .../github.com/bsm/sarama-cluster/README.md | 151 ++ .../bsm/sarama-cluster/README.md.tpl | 67 + .../github.com/bsm/sarama-cluster/balancer.go | 174 ++ .../github.com/bsm/sarama-cluster/client.go | 50 + .../github.com/bsm/sarama-cluster/cluster.go | 25 + .../github.com/bsm/sarama-cluster/config.go | 146 + .../github.com/bsm/sarama-cluster/consumer.go | 924 +++++++ vendor/github.com/bsm/sarama-cluster/doc.go | 8 + .../github.com/bsm/sarama-cluster/offsets.go | 69 + .../bsm/sarama-cluster/partitions.go | 287 ++ vendor/github.com/bsm/sarama-cluster/util.go | 75 + vendor/github.com/davecgh/go-spew/LICENSE | 15 + .../github.com/davecgh/go-spew/spew/bypass.go | 152 ++ .../davecgh/go-spew/spew/bypasssafe.go | 38 + .../github.com/davecgh/go-spew/spew/common.go | 341 +++ .../github.com/davecgh/go-spew/spew/config.go | 306 +++ vendor/github.com/davecgh/go-spew/spew/doc.go | 211 ++ .../github.com/davecgh/go-spew/spew/dump.go | 509 ++++ .../github.com/davecgh/go-spew/spew/format.go | 419 +++ .../github.com/davecgh/go-spew/spew/spew.go | 148 ++ .../github.com/eapache/go-resiliency/LICENSE | 22 + .../eapache/go-resiliency/breaker/README.md | 34 + .../eapache/go-resiliency/breaker/breaker.go | 161 ++ .../eapache/go-xerial-snappy/.gitignore | 24 + .../eapache/go-xerial-snappy/.travis.yml | 7 + .../eapache/go-xerial-snappy/LICENSE | 21 + .../eapache/go-xerial-snappy/README.md | 13 + .../eapache/go-xerial-snappy/fuzz.go | 16 + .../eapache/go-xerial-snappy/snappy.go | 71 + vendor/github.com/eapache/queue/.gitignore | 23 + vendor/github.com/eapache/queue/.travis.yml | 7 + vendor/github.com/eapache/queue/LICENSE | 21 + vendor/github.com/eapache/queue/README.md | 16 + vendor/github.com/eapache/queue/queue.go | 102 + vendor/github.com/go-redis/redis/.gitignore | 2 + vendor/github.com/go-redis/redis/.travis.yml | 20 + vendor/github.com/go-redis/redis/CHANGELOG.md | 12 + vendor/github.com/go-redis/redis/LICENSE | 25 + vendor/github.com/go-redis/redis/Makefile | 20 + vendor/github.com/go-redis/redis/README.md | 146 + vendor/github.com/go-redis/redis/cluster.go | 1624 ++++++++++++ .../go-redis/redis/cluster_commands.go | 22 + vendor/github.com/go-redis/redis/command.go | 1225 +++++++++ vendor/github.com/go-redis/redis/commands.go | 2337 +++++++++++++++++ vendor/github.com/go-redis/redis/doc.go | 4 + .../internal/consistenthash/consistenthash.go | 81 + .../go-redis/redis/internal/error.go | 83 + .../redis/internal/hashtag/hashtag.go | 77 + .../go-redis/redis/internal/internal.go | 24 + .../github.com/go-redis/redis/internal/log.go | 15 + .../go-redis/redis/internal/once.go | 60 + .../go-redis/redis/internal/pool/conn.go | 80 + .../go-redis/redis/internal/pool/pool.go | 416 +++ .../redis/internal/pool/pool_single.go | 53 + .../redis/internal/pool/pool_sticky.go | 109 + .../go-redis/redis/internal/proto/reader.go | 316 +++ .../go-redis/redis/internal/proto/scan.go | 166 ++ .../redis/internal/proto/write_buffer.go | 113 + .../internal/singleflight/singleflight.go | 64 + .../go-redis/redis/internal/util.go | 29 + .../go-redis/redis/internal/util/safe.go | 7 + .../go-redis/redis/internal/util/strconv.go | 19 + .../go-redis/redis/internal/util/unsafe.go | 12 + vendor/github.com/go-redis/redis/iterator.go | 73 + vendor/github.com/go-redis/redis/options.go | 203 ++ vendor/github.com/go-redis/redis/parser.go | 394 +++ vendor/github.com/go-redis/redis/pipeline.go | 113 + vendor/github.com/go-redis/redis/pubsub.go | 460 ++++ vendor/github.com/go-redis/redis/redis.go | 501 ++++ vendor/github.com/go-redis/redis/result.go | 140 + vendor/github.com/go-redis/redis/ring.go | 622 +++++ vendor/github.com/go-redis/redis/script.go | 62 + vendor/github.com/go-redis/redis/sentinel.go | 343 +++ vendor/github.com/go-redis/redis/tx.go | 110 + vendor/github.com/go-redis/redis/universal.go | 151 ++ vendor/github.com/gocql/gocql/.gitignore | 5 + vendor/github.com/gocql/gocql/.travis.yml | 47 + vendor/github.com/gocql/gocql/AUTHORS | 108 + vendor/github.com/gocql/gocql/CONTRIBUTING.md | 78 + vendor/github.com/gocql/gocql/LICENSE | 27 + vendor/github.com/gocql/gocql/README.md | 215 ++ .../gocql/gocql/address_translators.go | 26 + vendor/github.com/gocql/gocql/cluster.go | 202 ++ vendor/github.com/gocql/gocql/compressor.go | 28 + vendor/github.com/gocql/gocql/conn.go | 1200 +++++++++ .../github.com/gocql/gocql/connectionpool.go | 579 ++++ vendor/github.com/gocql/gocql/control.go | 482 ++++ vendor/github.com/gocql/gocql/debug_off.go | 5 + vendor/github.com/gocql/gocql/debug_on.go | 5 + vendor/github.com/gocql/gocql/doc.go | 9 + vendor/github.com/gocql/gocql/errors.go | 116 + vendor/github.com/gocql/gocql/events.go | 293 +++ vendor/github.com/gocql/gocql/filters.go | 57 + vendor/github.com/gocql/gocql/frame.go | 1957 ++++++++++++++ vendor/github.com/gocql/gocql/fuzz.go | 33 + vendor/github.com/gocql/gocql/go.mod | 7 + vendor/github.com/gocql/gocql/go.modverify | 3 + vendor/github.com/gocql/gocql/helpers.go | 365 +++ vendor/github.com/gocql/gocql/host_source.go | 692 +++++ .../github.com/gocql/gocql/host_source_gen.go | 45 + .../gocql/gocql/install_test_deps.sh | 16 + vendor/github.com/gocql/gocql/integration.sh | 87 + .../gocql/gocql/internal/lru/lru.go | 127 + .../gocql/gocql/internal/murmur/murmur.go | 135 + .../gocql/internal/murmur/murmur_appengine.go | 11 + .../gocql/internal/murmur/murmur_unsafe.go | 15 + .../gocql/gocql/internal/streams/streams.go | 140 + vendor/github.com/gocql/gocql/logger.go | 30 + vendor/github.com/gocql/gocql/marshal.go | 2216 ++++++++++++++++ vendor/github.com/gocql/gocql/metadata.go | 1100 ++++++++ vendor/github.com/gocql/gocql/policies.go | 854 ++++++ .../github.com/gocql/gocql/prepared_cache.go | 64 + .../github.com/gocql/gocql/query_executor.go | 99 + vendor/github.com/gocql/gocql/ring.go | 152 ++ vendor/github.com/gocql/gocql/session.go | 1787 +++++++++++++ vendor/github.com/gocql/gocql/token.go | 220 ++ vendor/github.com/gocql/gocql/topology.go | 212 ++ vendor/github.com/gocql/gocql/uuid.go | 272 ++ vendor/github.com/golang/snappy/.gitignore | 16 + vendor/github.com/golang/snappy/AUTHORS | 15 + vendor/github.com/golang/snappy/CONTRIBUTORS | 37 + vendor/github.com/golang/snappy/LICENSE | 27 + vendor/github.com/golang/snappy/README | 107 + vendor/github.com/golang/snappy/decode.go | 237 ++ .../github.com/golang/snappy/decode_amd64.go | 14 + .../github.com/golang/snappy/decode_amd64.s | 490 ++++ .../github.com/golang/snappy/decode_other.go | 101 + vendor/github.com/golang/snappy/encode.go | 285 ++ .../github.com/golang/snappy/encode_amd64.go | 29 + .../github.com/golang/snappy/encode_amd64.s | 730 +++++ .../github.com/golang/snappy/encode_other.go | 238 ++ vendor/github.com/golang/snappy/snappy.go | 98 + .../hailocab/go-hostpool/.gitignore | 22 + .../hailocab/go-hostpool/.travis.yml | 0 .../github.com/hailocab/go-hostpool/LICENSE | 21 + .../github.com/hailocab/go-hostpool/README.md | 17 + .../hailocab/go-hostpool/epsilon_greedy.go | 220 ++ .../go-hostpool/epsilon_value_calculators.go | 40 + .../hailocab/go-hostpool/host_entry.go | 62 + .../hailocab/go-hostpool/hostpool.go | 243 ++ vendor/github.com/hashicorp/consul/LICENSE | 354 +++ vendor/github.com/hashicorp/consul/NOTICE.md | 3 + .../github.com/hashicorp/consul/api/README.md | 43 + vendor/github.com/hashicorp/consul/api/acl.go | 193 ++ .../github.com/hashicorp/consul/api/agent.go | 791 ++++++ vendor/github.com/hashicorp/consul/api/api.go | 839 ++++++ .../hashicorp/consul/api/catalog.go | 213 ++ .../hashicorp/consul/api/connect.go | 12 + .../hashicorp/consul/api/connect_ca.go | 165 ++ .../hashicorp/consul/api/connect_intention.go | 302 +++ .../hashicorp/consul/api/coordinate.go | 106 + .../github.com/hashicorp/consul/api/event.go | 104 + .../github.com/hashicorp/consul/api/health.go | 232 ++ vendor/github.com/hashicorp/consul/api/kv.go | 420 +++ .../github.com/hashicorp/consul/api/lock.go | 386 +++ .../hashicorp/consul/api/operator.go | 11 + .../hashicorp/consul/api/operator_area.go | 193 ++ .../consul/api/operator_autopilot.go | 219 ++ .../hashicorp/consul/api/operator_keyring.go | 86 + .../hashicorp/consul/api/operator_raft.go | 89 + .../hashicorp/consul/api/operator_segment.go | 11 + .../hashicorp/consul/api/prepared_query.go | 212 ++ vendor/github.com/hashicorp/consul/api/raw.go | 24 + .../hashicorp/consul/api/semaphore.go | 514 ++++ .../hashicorp/consul/api/session.go | 224 ++ .../hashicorp/consul/api/snapshot.go | 47 + .../github.com/hashicorp/consul/api/status.go | 43 + .../ui-v2/app/styles/components/notice.scss | 24 + .../hashicorp/consul/website/LICENSE.md | 10 + .../source/api/operator/license.html.md | 143 + .../docs/commands/license.html.markdown.erb | 109 + .../hashicorp/go-rootcerts/.travis.yml | 12 + .../github.com/hashicorp/go-rootcerts/LICENSE | 363 +++ .../hashicorp/go-rootcerts/Makefile | 8 + .../hashicorp/go-rootcerts/README.md | 43 + .../github.com/hashicorp/go-rootcerts/doc.go | 9 + .../hashicorp/go-rootcerts/rootcerts.go | 103 + .../hashicorp/go-rootcerts/rootcerts_base.go | 12 + .../go-rootcerts/rootcerts_darwin.go | 48 + .../capath-with-symlinks/securetrust.pem | 1 + .../capath-with-symlinks/thawte.pem | 1 + vendor/github.com/hashicorp/serf/LICENSE | 354 +++ .../hashicorp/serf/coordinate/client.go | 180 ++ .../hashicorp/serf/coordinate/config.go | 70 + .../hashicorp/serf/coordinate/coordinate.go | 183 ++ .../hashicorp/serf/coordinate/phantom.go | 187 ++ .../hashicorp/serf/ops-misc/debian/copyright | 2 + .../hashicorp/serf/website/source/LICENSE | 10 + vendor/github.com/howeyc/crc16/.travis.yml | 1 + vendor/github.com/howeyc/crc16/LICENSE | 27 + vendor/github.com/howeyc/crc16/README.md | 34 + vendor/github.com/howeyc/crc16/crc16.go | 161 ++ vendor/github.com/howeyc/crc16/hash.go | 116 + vendor/github.com/kr/pretty/.gitignore | 4 + vendor/github.com/kr/pretty/License | 21 + vendor/github.com/kr/pretty/Readme | 9 + vendor/github.com/kr/pretty/diff.go | 265 ++ vendor/github.com/kr/pretty/formatter.go | 328 +++ vendor/github.com/kr/pretty/go.mod | 3 + vendor/github.com/kr/pretty/pretty.go | 108 + vendor/github.com/kr/pretty/zero.go | 41 + vendor/github.com/kr/text/License | 19 + vendor/github.com/kr/text/Readme | 3 + vendor/github.com/kr/text/doc.go | 3 + vendor/github.com/kr/text/go.mod | 3 + vendor/github.com/kr/text/indent.go | 74 + vendor/github.com/kr/text/wrap.go | 86 + vendor/github.com/ligato/cn-infra/.gitignore | 50 + vendor/github.com/ligato/cn-infra/.travis.yml | 27 + .../github.com/ligato/cn-infra/CHANGELOG.md | 266 ++ .../ligato/cn-infra/CONTRIBUTING.md | 15 + vendor/github.com/ligato/cn-infra/Gopkg.lock | 607 +++++ vendor/github.com/ligato/cn-infra/Gopkg.toml | 129 + vendor/github.com/ligato/cn-infra/Makefile | 138 + vendor/github.com/ligato/cn-infra/README.md | 156 ++ .../ligato/cn-infra/datasync/grpcsync/doc.go | 22 + .../datasync/grpcsync/grpc_watcher.go | 59 + .../cn-infra/datasync/grpcsync/msgservice.go | 100 + .../cn-infra/datasync/grpcsync/options.go | 32 + .../datasync/grpcsync/plugin_impl_grpsync.go | 51 + .../ligato/cn-infra/datasync/restsync/doc.go | 20 + .../datasync/restsync/http_handlers.go | 71 + .../datasync/restsync/rest_watcher.go | 63 + .../datasync/syncbase/msg/change_event.go | 75 + .../datasync/syncbase/msg/datamsg.pb.go | 766 ++++++ .../datasync/syncbase/msg/datamsg.proto | 120 + .../cn-infra/datasync/syncbase/msg/doc.go | 18 + .../github.com/ligato/cn-infra/db/README.md | 21 + vendor/github.com/ligato/cn-infra/db/doc.go | 17 + .../cn-infra/db/keyval/consul/README.md | 27 + .../cn-infra/db/keyval/consul/consul.conf | 6 + .../cn-infra/db/keyval/consul/consul.go | 549 ++++ .../cn-infra/db/keyval/consul/options.go | 62 + .../cn-infra/db/keyval/consul/plugin.go | 157 ++ .../ligato/cn-infra/db/keyval/consul/txn.go | 69 + .../cn-infra/db/keyval/etcd/mocks/doc.go | 17 + .../db/keyval/etcd/mocks/embeded_etcd.go | 79 + .../ligato/cn-infra/db/keyval/redis/README.md | 151 ++ .../db/keyval/redis/bytes_broker_impl.go | 531 ++++ .../db/keyval/redis/bytes_txn_impl.go | 139 + .../db/keyval/redis/bytes_watcher_impl.go | 198 ++ .../ligato/cn-infra/db/keyval/redis/config.go | 371 +++ .../ligato/cn-infra/db/keyval/redis/doc.go | 18 + .../cn-infra/db/keyval/redis/options.go | 41 + .../db/keyval/redis/plugin_impl_redis.go | 130 + .../ligato/cn-infra/db/sql/README.md | 22 + .../cn-infra/db/sql/cassandra/README.md | 69 + .../db/sql/cassandra/cassa_broker_impl.go | 129 + .../db/sql/cassandra/cassa_txn_impl.go | 29 + .../db/sql/cassandra/cassa_watcher_impl.go | 22 + .../cn-infra/db/sql/cassandra/cassandra.conf | 26 + .../cn-infra/db/sql/cassandra/config.go | 197 ++ .../ligato/cn-infra/db/sql/cassandra/doc.go | 73 + .../cn-infra/db/sql/cassandra/options.go | 41 + .../db/sql/cassandra/plugin_impl_cassa.go | 135 + .../ligato/cn-infra/db/sql/cassandra/query.go | 309 +++ .../github.com/ligato/cn-infra/db/sql/doc.go | 19 + .../ligato/cn-infra/db/sql/plugin_api_sql.go | 21 + .../ligato/cn-infra/db/sql/slice_utils.go | 68 + .../ligato/cn-infra/db/sql/sql_broker_api.go | 128 + .../ligato/cn-infra/db/sql/sql_expression.go | 266 ++ .../cn-infra/db/sql/sql_struct_metadata.go | 56 + .../ligato/cn-infra/db/sql/sql_watcher_api.go | 55 + vendor/github.com/ligato/cn-infra/doc.go | 31 + .../cn-infra/docker/dev_cn_infra/Dockerfile | 47 + .../cn-infra/docker/dev_cn_infra/README.md | 69 + .../docker/dev_cn_infra/build-agent.sh | 32 + .../cn-infra/docker/dev_cn_infra/build.sh | 24 + .../cn-infra/docker/dev_cn_infra/etcd.conf | 4 + .../cn-infra/docker/dev_cn_infra/kafka.conf | 2 + .../cn-infra/docker/dev_cn_infra/shrink.sh | 12 + .../docker/dev_cn_infra/supervisord.conf | 10 + .../cn-infra/docs/guidelines/CODINGSTYLE.md | 81 + .../ligato/cn-infra/docs/guidelines/CONFIG.md | 119 + .../cn-infra/docs/guidelines/DOCUMENTING.md | 15 + .../cn-infra/docs/guidelines/EXAMPLES.md | 50 + .../cn-infra/docs/guidelines/LOGGING.md | 175 ++ .../docs/guidelines/PLUGIN_DEPENDENCIES.md | 62 + .../docs/guidelines/PLUGIN_FLAVORS.md | 167 ++ .../docs/guidelines/PLUGIN_LIFECYCLE.md | 121 + .../ligato/cn-infra/docs/guidelines/README.md | 14 + .../docs/guidelines/SYSTEM_INTEGRATION.md | 40 + .../cn-infra/docs/guidelines/TESTING.md | 12 + .../docs/imgs/codecomplete_2nd_edition.jpg | Bin 0 -> 99676 bytes .../cn-infra/docs/imgs/datasync_pub.png | Bin 0 -> 41533 bytes .../cn-infra/docs/imgs/datasync_watch.png | Bin 0 -> 61410 bytes .../ligato/cn-infra/docs/imgs/db.png | Bin 0 -> 27985 bytes .../ligato/cn-infra/docs/imgs/flavors.png | Bin 0 -> 46185 bytes .../ligato/cn-infra/docs/imgs/grpc.png | Bin 0 -> 29489 bytes .../docs/imgs/high_level_arch_cninfra.png | Bin 0 -> 149755 bytes .../ligato/cn-infra/docs/imgs/http.png | Bin 0 -> 26221 bytes .../cn-infra/docs/imgs/idxmap_cache.png | Bin 0 -> 14298 bytes .../cn-infra/docs/imgs/idxmap_local.png | Bin 0 -> 12482 bytes .../cn-infra/docs/imgs/plugin_lifecycle.png | Bin 0 -> 40844 bytes .../cn-infra/docs/imgs/status_check.png | Bin 0 -> 47245 bytes .../cn-infra/docs/imgs/status_check_pull.png | Bin 0 -> 32703 bytes .../cn-infra/docs/imgs/status_check_push.png | Bin 0 -> 32175 bytes .../docs/readmes/cn_virtual_function.md | 29 + .../ligato/cn-infra/examples/README.md | 47 + .../cn-infra/examples/cassandra-lib/Readme.md | 23 + .../examples/cassandra-lib/client-config.yaml | 12 + .../cn-infra/examples/cassandra-lib/doc.go | 3 + .../cn-infra/examples/cassandra-lib/main.go | 287 ++ .../examples/configs-plugin/Readme.md | 10 + .../cn-infra/examples/configs-plugin/doc.go | 2 + .../examples/configs-plugin/example.conf | 2 + .../cn-infra/examples/configs-plugin/main.go | 124 + .../cn-infra/examples/consul-lib/main.go | 146 + .../examples/datasync-plugin/Readme.md | 32 + .../cn-infra/examples/datasync-plugin/doc.go | 4 + .../examples/datasync-plugin/etcd.conf | 4 + .../cn-infra/examples/datasync-plugin/main.go | 307 +++ .../ligato/cn-infra/examples/doc.go | 17 + .../cn-infra/examples/etcd-lib/Makefile | 22 + .../cn-infra/examples/etcd-lib/Readme.md | 71 + .../ligato/cn-infra/examples/etcd-lib/doc.go | 3 + .../cn-infra/examples/etcd-lib/editor/doc.go | 3 + .../examples/etcd-lib/editor/editor.go | 142 + .../cn-infra/examples/etcd-lib/etcd.conf | 4 + .../etcd-lib/model/phonebook/config.go | 13 + .../etcd-lib/model/phonebook/phonebook.pb.go | 29 + .../etcd-lib/model/phonebook/phonebook.proto | 9 + .../cn-infra/examples/etcd-lib/view/doc.go | 2 + .../cn-infra/examples/etcd-lib/view/view.go | 89 + .../cn-infra/examples/etcd-lib/watcher/doc.go | 2 + .../examples/etcd-lib/watcher/watcher.go | 118 + .../cn-infra/examples/flags-lib/Readme.md | 19 + .../ligato/cn-infra/examples/flags-lib/doc.go | 3 + .../cn-infra/examples/flags-lib/main.go | 66 + .../cn-infra/examples/grpc-plugin/README.md | 12 + .../cn-infra/examples/grpc-plugin/doc.go | 2 + .../examples/grpc-plugin/grpc-client/main.go | 38 + .../examples/grpc-plugin/grpc-server/doc.go | 2 + .../grpc-plugin/grpc-server/grpc.conf | 1 + .../examples/grpc-plugin/grpc-server/main.go | 161 ++ .../cn-infra/examples/kafka-lib/Makefile | 12 + .../kafka-lib/asyncproducer/README.md | 91 + .../kafka-lib/asyncproducer/asyncproducer.go | 175 ++ .../examples/kafka-lib/asyncproducer/doc.go | 3 + .../examples/kafka-lib/consumer/README.md | 55 + .../examples/kafka-lib/consumer/consumer.go | 195 ++ .../examples/kafka-lib/consumer/doc.go | 3 + .../ligato/cn-infra/examples/kafka-lib/doc.go | 2 + .../cn-infra/examples/kafka-lib/mux/README.md | 30 + .../cn-infra/examples/kafka-lib/mux/config | 6 + .../cn-infra/examples/kafka-lib/mux/doc.go | 3 + .../cn-infra/examples/kafka-lib/mux/main.go | 93 + .../examples/kafka-lib/syncproducer/README.md | 88 + .../examples/kafka-lib/syncproducer/doc.go | 3 + .../kafka-lib/syncproducer/syncproducer.go | 152 ++ .../cn-infra/examples/kafka-lib/utils/doc.go | 2 + .../examples/kafka-lib/utils/prompter.go | 86 + .../cn-infra/examples/kafka-plugin/doc.go | 2 + .../kafka-plugin/hash-partitioner/README.md | 30 + .../kafka-plugin/hash-partitioner/doc.go | 3 + .../kafka-plugin/hash-partitioner/kafka.conf | 3 + .../kafka-plugin/hash-partitioner/main.go | 311 +++ .../kafka-plugin/manual-partitioner/README.md | 39 + .../kafka-plugin/manual-partitioner/doc.go | 3 + .../manual-partitioner/kafka.conf | 2 + .../kafka-plugin/manual-partitioner/main.go | 347 +++ .../manual-partitioner/server.properties | 17 + .../kafka-plugin/post-init-consumer/doc.go | 3 + .../post-init-consumer/kafka.conf | 2 + .../kafka-plugin/post-init-consumer/main.go | 206 ++ .../cn-infra/examples/logs-lib/Makefile | 10 + .../cn-infra/examples/logs-lib/basic/REDME.md | 7 + .../cn-infra/examples/logs-lib/basic/basic.go | 64 + .../cn-infra/examples/logs-lib/basic/doc.go | 2 + .../examples/logs-lib/custom/README.md | 7 + .../examples/logs-lib/custom/custom.go | 51 + .../cn-infra/examples/logs-lib/custom/doc.go | 2 + .../ligato/cn-infra/examples/logs-lib/doc.go | 3 + .../cn-infra/examples/logs-lib/http/README.md | 22 + .../cn-infra/examples/logs-lib/http/doc.go | 2 + .../cn-infra/examples/logs-lib/http/server.go | 80 + .../cn-infra/examples/logs-plugin/README.md | 7 + .../cn-infra/examples/logs-plugin/doc.go | 2 + .../cn-infra/examples/logs-plugin/logs.conf | 8 + .../cn-infra/examples/logs-plugin/main.go | 126 + .../ligato/cn-infra/examples/model/README.md | 25 + .../ligato/cn-infra/examples/model/doc.go | 3 + .../cn-infra/examples/model/example.pb.go | 38 + .../cn-infra/examples/model/example.proto | 14 + .../examples/prometheus-plugin/README.md | 63 + .../examples/prometheus-plugin/main.go | 189 ++ .../cn-infra/examples/redis-lib/Makefile | 27 + .../cn-infra/examples/redis-lib/Readme.md | 58 + .../examples/redis-lib/airport/airport.go | 653 +++++ .../examples/redis-lib/airport/doc.go | 2 + .../examples/redis-lib/airport/model/doc.go | 2 + .../redis-lib/airport/model/flight.pb.go | 111 + .../redis-lib/airport/model/flight.proto | 15 + .../examples/redis-lib/cluster-client.yaml | 17 + .../cn-infra/examples/redis-lib/diagram.png | Bin 0 -> 36745 bytes .../ligato/cn-infra/examples/redis-lib/doc.go | 3 + .../examples/redis-lib/node-client.yaml | 19 + .../examples/redis-lib/sentinel-client.yaml | 16 + .../cn-infra/examples/redis-lib/simple/doc.go | 2 + .../examples/redis-lib/simple/simple.go | 412 +++ .../cn-infra/examples/redis-plugin/README.md | 10 + .../cn-infra/examples/redis-plugin/doc.go | 2 + .../cn-infra/examples/redis-plugin/main.go | 77 + .../cn-infra/examples/simple-agent/README.md | 20 + .../cn-infra/examples/simple-agent/agent.go | 36 + .../cn-infra/examples/simple-agent/doc.go | 3 + .../examples/statuscheck-plugin/README.md | 54 + .../examples/statuscheck-plugin/doc.go | 17 + .../examples/statuscheck-plugin/etcd.conf | 4 + .../examples/statuscheck-plugin/main.go | 108 + .../github.com/ligato/cn-infra/health/doc.go | 3 + .../ligato/cn-infra/health/probe/doc.go | 16 + .../ligato/cn-infra/health/probe/options.go | 59 + .../health/probe/plugin_impl_probe.go | 115 + .../cn-infra/health/probe/prometheus_probe.go | 130 + .../health/statuscheck/pluginstatusmap/doc.go | 3 + .../pluginstatusmap/plugin_status_map.go | 140 + .../cn-infra/logging/logmanager/README.md | 25 + .../ligato/cn-infra/logging/logmanager/doc.go | 17 + .../cn-infra/logging/logmanager/options.go | 49 + .../logmanager/plugin_impl_log_manager.go | 218 ++ .../ligato/cn-infra/messaging/kafka/README.md | 34 + .../messaging/kafka/client/asyncproducer.go | 267 ++ .../cn-infra/messaging/kafka/client/config.go | 308 +++ .../messaging/kafka/client/consumer.go | 360 +++ .../cn-infra/messaging/kafka/client/doc.go | 17 + .../messaging/kafka/client/messages.go | 276 ++ .../cn-infra/messaging/kafka/client/mocks.go | 181 ++ .../messaging/kafka/client/syncproducer.go | 217 ++ .../ligato/cn-infra/messaging/kafka/doc.go | 221 ++ .../cn-infra/messaging/kafka/kafka.conf | 9 + .../cn-infra/messaging/kafka/mux/README.md | 29 + .../messaging/kafka/mux/bytes_connection.go | 323 +++ .../cn-infra/messaging/kafka/mux/chan.go | 66 + .../cn-infra/messaging/kafka/mux/config.go | 153 ++ .../cn-infra/messaging/kafka/mux/doc.go | 17 + .../cn-infra/messaging/kafka/mux/mock.go | 49 + .../messaging/kafka/mux/multiplexer.go | 372 +++ .../messaging/kafka/mux/proto_connection.go | 364 +++ .../cn-infra/messaging/kafka/options.go | 57 + .../messaging/kafka/plugin_impl_kafka.go | 239 ++ vendor/github.com/ligato/cn-infra/rpc/doc.go | 2 + .../ligato/cn-infra/rpc/rest/mock/doc.go | 2 + .../ligato/cn-infra/rpc/rest/mock/httpmock.go | 61 + .../ligato/cn-infra/scripts/check_links.sh | 18 + .../ligato/cn-infra/scripts/gofmt.sh | 3 + .../cn-infra/scripts/static_analysis.sh | 15 + .../docker_start_stop_functions.sh | 79 + .../scripts/test_examples/plugin_reconnect.sh | 232 ++ .../scripts/test_examples/test_examples.sh | 486 ++++ .../ligato/cn-infra/utils/README.md | 3 + .../ligato/cn-infra/utils/clienttls/doc.go | 16 + .../cn-infra/utils/clienttls/tlsutil.go | 65 + .../github.com/ligato/cn-infra/utils/doc.go | 2 + .../ligato/cn-infra/utils/runtimeutils/doc.go | 2 + .../utils/runtimeutils/runtimeutils.go | 56 + .../ligato/cn-infra/utils/structs/doc.go | 16 + .../utils/structs/structs_reflection.go | 136 + vendor/github.com/maraino/go-mock/.travis.yml | 9 + vendor/github.com/maraino/go-mock/AUTHORS | 4 + vendor/github.com/maraino/go-mock/LICENSE | 22 + vendor/github.com/maraino/go-mock/Makefile | 11 + vendor/github.com/maraino/go-mock/README.md | 201 ++ vendor/github.com/maraino/go-mock/mock.go | 682 +++++ .../github.com/mitchellh/go-homedir/LICENSE | 21 + .../github.com/mitchellh/go-homedir/README.md | 14 + .../mitchellh/go-homedir/homedir.go | 155 ++ .../mitchellh/mapstructure/.travis.yml | 8 + .../github.com/mitchellh/mapstructure/LICENSE | 21 + .../mitchellh/mapstructure/README.md | 46 + .../mitchellh/mapstructure/decode_hooks.go | 171 ++ .../mitchellh/mapstructure/error.go | 50 + .../mitchellh/mapstructure/mapstructure.go | 1064 ++++++++ vendor/github.com/pierrec/lz4/.gitignore | 33 + vendor/github.com/pierrec/lz4/.travis.yml | 18 + vendor/github.com/pierrec/lz4/LICENSE | 28 + vendor/github.com/pierrec/lz4/README.md | 22 + vendor/github.com/pierrec/lz4/block.go | 397 +++ vendor/github.com/pierrec/lz4/debug.go | 21 + vendor/github.com/pierrec/lz4/debug_stub.go | 5 + .../pierrec/lz4/internal/xxh32/xxh32zero.go | 222 ++ vendor/github.com/pierrec/lz4/lz4.go | 68 + vendor/github.com/pierrec/lz4/lz4_go1.10.go | 29 + .../github.com/pierrec/lz4/lz4_notgo1.10.go | 29 + vendor/github.com/pierrec/lz4/reader.go | 273 ++ vendor/github.com/pierrec/lz4/writer.go | 235 ++ .../github.com/rcrowley/go-metrics/.gitignore | 9 + .../rcrowley/go-metrics/.travis.yml | 17 + vendor/github.com/rcrowley/go-metrics/LICENSE | 29 + .../github.com/rcrowley/go-metrics/README.md | 168 ++ .../github.com/rcrowley/go-metrics/counter.go | 112 + .../github.com/rcrowley/go-metrics/debug.go | 76 + vendor/github.com/rcrowley/go-metrics/ewma.go | 138 + .../github.com/rcrowley/go-metrics/gauge.go | 120 + .../rcrowley/go-metrics/gauge_float64.go | 125 + .../rcrowley/go-metrics/graphite.go | 113 + .../rcrowley/go-metrics/healthcheck.go | 61 + .../rcrowley/go-metrics/histogram.go | 202 ++ vendor/github.com/rcrowley/go-metrics/json.go | 31 + vendor/github.com/rcrowley/go-metrics/log.go | 80 + .../github.com/rcrowley/go-metrics/memory.md | 285 ++ .../github.com/rcrowley/go-metrics/meter.go | 257 ++ .../github.com/rcrowley/go-metrics/metrics.go | 13 + .../rcrowley/go-metrics/opentsdb.go | 119 + .../rcrowley/go-metrics/registry.go | 363 +++ .../github.com/rcrowley/go-metrics/runtime.go | 212 ++ .../rcrowley/go-metrics/runtime_cgo.go | 10 + .../go-metrics/runtime_gccpufraction.go | 9 + .../rcrowley/go-metrics/runtime_no_cgo.go | 7 + .../go-metrics/runtime_no_gccpufraction.go | 9 + .../github.com/rcrowley/go-metrics/sample.go | 616 +++++ .../github.com/rcrowley/go-metrics/syslog.go | 78 + .../github.com/rcrowley/go-metrics/timer.go | 329 +++ .../rcrowley/go-metrics/validate.sh | 10 + .../github.com/rcrowley/go-metrics/writer.go | 100 + .../github.com/willfaught/gockle/.travis.yml | 12 + vendor/github.com/willfaught/gockle/batch.go | 108 + vendor/github.com/willfaught/gockle/doc.go | 15 + .../github.com/willfaught/gockle/iterator.go | 61 + .../github.com/willfaught/gockle/license.md | 21 + vendor/github.com/willfaught/gockle/readme.md | 8 + .../github.com/willfaught/gockle/session.go | 228 ++ vendor/gopkg.in/inf.v0/LICENSE | 28 + vendor/gopkg.in/inf.v0/dec.go | 615 +++++ vendor/gopkg.in/inf.v0/rounder.go | 145 + 644 files changed, 86855 insertions(+), 2 deletions(-) create mode 100644 vendor/github.com/Shopify/sarama/.gitignore create mode 100644 vendor/github.com/Shopify/sarama/.travis.yml create mode 100644 vendor/github.com/Shopify/sarama/CHANGELOG.md create mode 100644 vendor/github.com/Shopify/sarama/LICENSE create mode 100644 vendor/github.com/Shopify/sarama/Makefile create mode 100644 vendor/github.com/Shopify/sarama/README.md create mode 100644 vendor/github.com/Shopify/sarama/Vagrantfile create mode 100644 vendor/github.com/Shopify/sarama/acl_bindings.go create mode 100644 vendor/github.com/Shopify/sarama/acl_create_request.go create mode 100644 vendor/github.com/Shopify/sarama/acl_create_response.go create mode 100644 vendor/github.com/Shopify/sarama/acl_delete_request.go create mode 100644 vendor/github.com/Shopify/sarama/acl_delete_response.go create mode 100644 vendor/github.com/Shopify/sarama/acl_describe_request.go create mode 100644 vendor/github.com/Shopify/sarama/acl_describe_response.go create mode 100644 vendor/github.com/Shopify/sarama/acl_filter.go create mode 100644 vendor/github.com/Shopify/sarama/acl_types.go create mode 100644 vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go create mode 100644 vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go create mode 100644 vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go create mode 100644 vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go create mode 100644 vendor/github.com/Shopify/sarama/alter_configs_request.go create mode 100644 vendor/github.com/Shopify/sarama/alter_configs_response.go create mode 100644 vendor/github.com/Shopify/sarama/api_versions_request.go create mode 100644 vendor/github.com/Shopify/sarama/api_versions_response.go create mode 100644 vendor/github.com/Shopify/sarama/async_producer.go create mode 100644 vendor/github.com/Shopify/sarama/broker.go create mode 100644 vendor/github.com/Shopify/sarama/client.go create mode 100644 vendor/github.com/Shopify/sarama/config.go create mode 100644 vendor/github.com/Shopify/sarama/config_resource_type.go create mode 100644 vendor/github.com/Shopify/sarama/consumer.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_group_members.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_request.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_response.go create mode 100644 vendor/github.com/Shopify/sarama/crc32_field.go create mode 100644 vendor/github.com/Shopify/sarama/create_partitions_request.go create mode 100644 vendor/github.com/Shopify/sarama/create_partitions_response.go create mode 100644 vendor/github.com/Shopify/sarama/create_topics_request.go create mode 100644 vendor/github.com/Shopify/sarama/create_topics_response.go create mode 100644 vendor/github.com/Shopify/sarama/delete_groups_request.go create mode 100644 vendor/github.com/Shopify/sarama/delete_groups_response.go create mode 100644 vendor/github.com/Shopify/sarama/delete_records_request.go create mode 100644 vendor/github.com/Shopify/sarama/delete_records_response.go create mode 100644 vendor/github.com/Shopify/sarama/delete_topics_request.go create mode 100644 vendor/github.com/Shopify/sarama/delete_topics_response.go create mode 100644 vendor/github.com/Shopify/sarama/describe_configs_request.go create mode 100644 vendor/github.com/Shopify/sarama/describe_configs_response.go create mode 100644 vendor/github.com/Shopify/sarama/describe_groups_request.go create mode 100644 vendor/github.com/Shopify/sarama/describe_groups_response.go create mode 100644 vendor/github.com/Shopify/sarama/dev.yml create mode 100644 vendor/github.com/Shopify/sarama/encoder_decoder.go create mode 100644 vendor/github.com/Shopify/sarama/end_txn_request.go create mode 100644 vendor/github.com/Shopify/sarama/end_txn_response.go create mode 100644 vendor/github.com/Shopify/sarama/errors.go create mode 100644 vendor/github.com/Shopify/sarama/fetch_request.go create mode 100644 vendor/github.com/Shopify/sarama/fetch_response.go create mode 100644 vendor/github.com/Shopify/sarama/find_coordinator_request.go create mode 100644 vendor/github.com/Shopify/sarama/find_coordinator_response.go create mode 100644 vendor/github.com/Shopify/sarama/heartbeat_request.go create mode 100644 vendor/github.com/Shopify/sarama/heartbeat_response.go create mode 100644 vendor/github.com/Shopify/sarama/init_producer_id_request.go create mode 100644 vendor/github.com/Shopify/sarama/init_producer_id_response.go create mode 100644 vendor/github.com/Shopify/sarama/join_group_request.go create mode 100644 vendor/github.com/Shopify/sarama/join_group_response.go create mode 100644 vendor/github.com/Shopify/sarama/leave_group_request.go create mode 100644 vendor/github.com/Shopify/sarama/leave_group_response.go create mode 100644 vendor/github.com/Shopify/sarama/length_field.go create mode 100644 vendor/github.com/Shopify/sarama/list_groups_request.go create mode 100644 vendor/github.com/Shopify/sarama/list_groups_response.go create mode 100644 vendor/github.com/Shopify/sarama/message.go create mode 100644 vendor/github.com/Shopify/sarama/message_set.go create mode 100644 vendor/github.com/Shopify/sarama/metadata_request.go create mode 100644 vendor/github.com/Shopify/sarama/metadata_response.go create mode 100644 vendor/github.com/Shopify/sarama/metrics.go create mode 100644 vendor/github.com/Shopify/sarama/mockbroker.go create mode 100644 vendor/github.com/Shopify/sarama/mockresponses.go create mode 100644 vendor/github.com/Shopify/sarama/mocks/README.md create mode 100644 vendor/github.com/Shopify/sarama/mocks/async_producer.go create mode 100644 vendor/github.com/Shopify/sarama/mocks/consumer.go create mode 100644 vendor/github.com/Shopify/sarama/mocks/mocks.go create mode 100644 vendor/github.com/Shopify/sarama/mocks/sync_producer.go create mode 100644 vendor/github.com/Shopify/sarama/offset_commit_request.go create mode 100644 vendor/github.com/Shopify/sarama/offset_commit_response.go create mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_request.go create mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_response.go create mode 100644 vendor/github.com/Shopify/sarama/offset_manager.go create mode 100644 vendor/github.com/Shopify/sarama/offset_request.go create mode 100644 vendor/github.com/Shopify/sarama/offset_response.go create mode 100644 vendor/github.com/Shopify/sarama/packet_decoder.go create mode 100644 vendor/github.com/Shopify/sarama/packet_encoder.go create mode 100644 vendor/github.com/Shopify/sarama/partitioner.go create mode 100644 vendor/github.com/Shopify/sarama/prep_encoder.go create mode 100644 vendor/github.com/Shopify/sarama/produce_request.go create mode 100644 vendor/github.com/Shopify/sarama/produce_response.go create mode 100644 vendor/github.com/Shopify/sarama/produce_set.go create mode 100644 vendor/github.com/Shopify/sarama/real_decoder.go create mode 100644 vendor/github.com/Shopify/sarama/real_encoder.go create mode 100644 vendor/github.com/Shopify/sarama/record.go create mode 100644 vendor/github.com/Shopify/sarama/record_batch.go create mode 100644 vendor/github.com/Shopify/sarama/records.go create mode 100644 vendor/github.com/Shopify/sarama/request.go create mode 100644 vendor/github.com/Shopify/sarama/response_header.go create mode 100644 vendor/github.com/Shopify/sarama/sarama.go create mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_request.go create mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_response.go create mode 100644 vendor/github.com/Shopify/sarama/sync_group_request.go create mode 100644 vendor/github.com/Shopify/sarama/sync_group_response.go create mode 100644 vendor/github.com/Shopify/sarama/sync_producer.go create mode 100644 vendor/github.com/Shopify/sarama/timestamp.go create mode 100644 vendor/github.com/Shopify/sarama/txn_offset_commit_request.go create mode 100644 vendor/github.com/Shopify/sarama/txn_offset_commit_response.go create mode 100644 vendor/github.com/Shopify/sarama/utils.go create mode 100644 vendor/github.com/bsm/sarama-cluster/.gitignore create mode 100644 vendor/github.com/bsm/sarama-cluster/.travis.yml create mode 100644 vendor/github.com/bsm/sarama-cluster/Gopkg.lock create mode 100644 vendor/github.com/bsm/sarama-cluster/Gopkg.toml create mode 100644 vendor/github.com/bsm/sarama-cluster/LICENSE create mode 100644 vendor/github.com/bsm/sarama-cluster/Makefile create mode 100644 vendor/github.com/bsm/sarama-cluster/README.md create mode 100644 vendor/github.com/bsm/sarama-cluster/README.md.tpl create mode 100644 vendor/github.com/bsm/sarama-cluster/balancer.go create mode 100644 vendor/github.com/bsm/sarama-cluster/client.go create mode 100644 vendor/github.com/bsm/sarama-cluster/cluster.go create mode 100644 vendor/github.com/bsm/sarama-cluster/config.go create mode 100644 vendor/github.com/bsm/sarama-cluster/consumer.go create mode 100644 vendor/github.com/bsm/sarama-cluster/doc.go create mode 100644 vendor/github.com/bsm/sarama-cluster/offsets.go create mode 100644 vendor/github.com/bsm/sarama-cluster/partitions.go create mode 100644 vendor/github.com/bsm/sarama-cluster/util.go create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go create mode 100644 vendor/github.com/eapache/go-resiliency/LICENSE create mode 100644 vendor/github.com/eapache/go-resiliency/breaker/README.md create mode 100644 vendor/github.com/eapache/go-resiliency/breaker/breaker.go create mode 100644 vendor/github.com/eapache/go-xerial-snappy/.gitignore create mode 100644 vendor/github.com/eapache/go-xerial-snappy/.travis.yml create mode 100644 vendor/github.com/eapache/go-xerial-snappy/LICENSE create mode 100644 vendor/github.com/eapache/go-xerial-snappy/README.md create mode 100644 vendor/github.com/eapache/go-xerial-snappy/fuzz.go create mode 100644 vendor/github.com/eapache/go-xerial-snappy/snappy.go create mode 100644 vendor/github.com/eapache/queue/.gitignore create mode 100644 vendor/github.com/eapache/queue/.travis.yml create mode 100644 vendor/github.com/eapache/queue/LICENSE create mode 100644 vendor/github.com/eapache/queue/README.md create mode 100644 vendor/github.com/eapache/queue/queue.go create mode 100644 vendor/github.com/go-redis/redis/.gitignore create mode 100644 vendor/github.com/go-redis/redis/.travis.yml create mode 100644 vendor/github.com/go-redis/redis/CHANGELOG.md create mode 100644 vendor/github.com/go-redis/redis/LICENSE create mode 100644 vendor/github.com/go-redis/redis/Makefile create mode 100644 vendor/github.com/go-redis/redis/README.md create mode 100644 vendor/github.com/go-redis/redis/cluster.go create mode 100644 vendor/github.com/go-redis/redis/cluster_commands.go create mode 100644 vendor/github.com/go-redis/redis/command.go create mode 100644 vendor/github.com/go-redis/redis/commands.go create mode 100644 vendor/github.com/go-redis/redis/doc.go create mode 100644 vendor/github.com/go-redis/redis/internal/consistenthash/consistenthash.go create mode 100644 vendor/github.com/go-redis/redis/internal/error.go create mode 100644 vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go create mode 100644 vendor/github.com/go-redis/redis/internal/internal.go create mode 100644 vendor/github.com/go-redis/redis/internal/log.go create mode 100644 vendor/github.com/go-redis/redis/internal/once.go create mode 100644 vendor/github.com/go-redis/redis/internal/pool/conn.go create mode 100644 vendor/github.com/go-redis/redis/internal/pool/pool.go create mode 100644 vendor/github.com/go-redis/redis/internal/pool/pool_single.go create mode 100644 vendor/github.com/go-redis/redis/internal/pool/pool_sticky.go create mode 100644 vendor/github.com/go-redis/redis/internal/proto/reader.go create mode 100644 vendor/github.com/go-redis/redis/internal/proto/scan.go create mode 100644 vendor/github.com/go-redis/redis/internal/proto/write_buffer.go create mode 100644 vendor/github.com/go-redis/redis/internal/singleflight/singleflight.go create mode 100644 vendor/github.com/go-redis/redis/internal/util.go create mode 100644 vendor/github.com/go-redis/redis/internal/util/safe.go create mode 100644 vendor/github.com/go-redis/redis/internal/util/strconv.go create mode 100644 vendor/github.com/go-redis/redis/internal/util/unsafe.go create mode 100644 vendor/github.com/go-redis/redis/iterator.go create mode 100644 vendor/github.com/go-redis/redis/options.go create mode 100644 vendor/github.com/go-redis/redis/parser.go create mode 100644 vendor/github.com/go-redis/redis/pipeline.go create mode 100644 vendor/github.com/go-redis/redis/pubsub.go create mode 100644 vendor/github.com/go-redis/redis/redis.go create mode 100644 vendor/github.com/go-redis/redis/result.go create mode 100644 vendor/github.com/go-redis/redis/ring.go create mode 100644 vendor/github.com/go-redis/redis/script.go create mode 100644 vendor/github.com/go-redis/redis/sentinel.go create mode 100644 vendor/github.com/go-redis/redis/tx.go create mode 100644 vendor/github.com/go-redis/redis/universal.go create mode 100644 vendor/github.com/gocql/gocql/.gitignore create mode 100644 vendor/github.com/gocql/gocql/.travis.yml create mode 100644 vendor/github.com/gocql/gocql/AUTHORS create mode 100644 vendor/github.com/gocql/gocql/CONTRIBUTING.md create mode 100644 vendor/github.com/gocql/gocql/LICENSE create mode 100644 vendor/github.com/gocql/gocql/README.md create mode 100644 vendor/github.com/gocql/gocql/address_translators.go create mode 100644 vendor/github.com/gocql/gocql/cluster.go create mode 100644 vendor/github.com/gocql/gocql/compressor.go create mode 100644 vendor/github.com/gocql/gocql/conn.go create mode 100644 vendor/github.com/gocql/gocql/connectionpool.go create mode 100644 vendor/github.com/gocql/gocql/control.go create mode 100644 vendor/github.com/gocql/gocql/debug_off.go create mode 100644 vendor/github.com/gocql/gocql/debug_on.go create mode 100644 vendor/github.com/gocql/gocql/doc.go create mode 100644 vendor/github.com/gocql/gocql/errors.go create mode 100644 vendor/github.com/gocql/gocql/events.go create mode 100644 vendor/github.com/gocql/gocql/filters.go create mode 100644 vendor/github.com/gocql/gocql/frame.go create mode 100644 vendor/github.com/gocql/gocql/fuzz.go create mode 100644 vendor/github.com/gocql/gocql/go.mod create mode 100644 vendor/github.com/gocql/gocql/go.modverify create mode 100644 vendor/github.com/gocql/gocql/helpers.go create mode 100644 vendor/github.com/gocql/gocql/host_source.go create mode 100644 vendor/github.com/gocql/gocql/host_source_gen.go create mode 100755 vendor/github.com/gocql/gocql/install_test_deps.sh create mode 100755 vendor/github.com/gocql/gocql/integration.sh create mode 100644 vendor/github.com/gocql/gocql/internal/lru/lru.go create mode 100644 vendor/github.com/gocql/gocql/internal/murmur/murmur.go create mode 100644 vendor/github.com/gocql/gocql/internal/murmur/murmur_appengine.go create mode 100644 vendor/github.com/gocql/gocql/internal/murmur/murmur_unsafe.go create mode 100644 vendor/github.com/gocql/gocql/internal/streams/streams.go create mode 100644 vendor/github.com/gocql/gocql/logger.go create mode 100644 vendor/github.com/gocql/gocql/marshal.go create mode 100644 vendor/github.com/gocql/gocql/metadata.go create mode 100644 vendor/github.com/gocql/gocql/policies.go create mode 100644 vendor/github.com/gocql/gocql/prepared_cache.go create mode 100644 vendor/github.com/gocql/gocql/query_executor.go create mode 100644 vendor/github.com/gocql/gocql/ring.go create mode 100644 vendor/github.com/gocql/gocql/session.go create mode 100644 vendor/github.com/gocql/gocql/token.go create mode 100644 vendor/github.com/gocql/gocql/topology.go create mode 100644 vendor/github.com/gocql/gocql/uuid.go create mode 100644 vendor/github.com/golang/snappy/.gitignore create mode 100644 vendor/github.com/golang/snappy/AUTHORS create mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS create mode 100644 vendor/github.com/golang/snappy/LICENSE create mode 100644 vendor/github.com/golang/snappy/README create mode 100644 vendor/github.com/golang/snappy/decode.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.s create mode 100644 vendor/github.com/golang/snappy/decode_other.go create mode 100644 vendor/github.com/golang/snappy/encode.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.s create mode 100644 vendor/github.com/golang/snappy/encode_other.go create mode 100644 vendor/github.com/golang/snappy/snappy.go create mode 100644 vendor/github.com/hailocab/go-hostpool/.gitignore create mode 100644 vendor/github.com/hailocab/go-hostpool/.travis.yml create mode 100644 vendor/github.com/hailocab/go-hostpool/LICENSE create mode 100644 vendor/github.com/hailocab/go-hostpool/README.md create mode 100644 vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go create mode 100644 vendor/github.com/hailocab/go-hostpool/epsilon_value_calculators.go create mode 100644 vendor/github.com/hailocab/go-hostpool/host_entry.go create mode 100644 vendor/github.com/hailocab/go-hostpool/hostpool.go create mode 100644 vendor/github.com/hashicorp/consul/LICENSE create mode 100644 vendor/github.com/hashicorp/consul/NOTICE.md create mode 100644 vendor/github.com/hashicorp/consul/api/README.md create mode 100644 vendor/github.com/hashicorp/consul/api/acl.go create mode 100644 vendor/github.com/hashicorp/consul/api/agent.go create mode 100644 vendor/github.com/hashicorp/consul/api/api.go create mode 100644 vendor/github.com/hashicorp/consul/api/catalog.go create mode 100644 vendor/github.com/hashicorp/consul/api/connect.go create mode 100644 vendor/github.com/hashicorp/consul/api/connect_ca.go create mode 100644 vendor/github.com/hashicorp/consul/api/connect_intention.go create mode 100644 vendor/github.com/hashicorp/consul/api/coordinate.go create mode 100644 vendor/github.com/hashicorp/consul/api/event.go create mode 100644 vendor/github.com/hashicorp/consul/api/health.go create mode 100644 vendor/github.com/hashicorp/consul/api/kv.go create mode 100644 vendor/github.com/hashicorp/consul/api/lock.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_area.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_autopilot.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_keyring.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_raft.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_segment.go create mode 100644 vendor/github.com/hashicorp/consul/api/prepared_query.go create mode 100644 vendor/github.com/hashicorp/consul/api/raw.go create mode 100644 vendor/github.com/hashicorp/consul/api/semaphore.go create mode 100644 vendor/github.com/hashicorp/consul/api/session.go create mode 100644 vendor/github.com/hashicorp/consul/api/snapshot.go create mode 100644 vendor/github.com/hashicorp/consul/api/status.go create mode 100644 vendor/github.com/hashicorp/consul/ui-v2/app/styles/components/notice.scss create mode 100644 vendor/github.com/hashicorp/consul/website/LICENSE.md create mode 100644 vendor/github.com/hashicorp/consul/website/source/api/operator/license.html.md create mode 100644 vendor/github.com/hashicorp/consul/website/source/docs/commands/license.html.markdown.erb create mode 100644 vendor/github.com/hashicorp/go-rootcerts/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-rootcerts/LICENSE create mode 100644 vendor/github.com/hashicorp/go-rootcerts/Makefile create mode 100644 vendor/github.com/hashicorp/go-rootcerts/README.md create mode 100644 vendor/github.com/hashicorp/go-rootcerts/doc.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go create mode 120000 vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem create mode 120000 vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem create mode 100644 vendor/github.com/hashicorp/serf/LICENSE create mode 100644 vendor/github.com/hashicorp/serf/coordinate/client.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/config.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/coordinate.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/phantom.go create mode 100644 vendor/github.com/hashicorp/serf/ops-misc/debian/copyright create mode 100644 vendor/github.com/hashicorp/serf/website/source/LICENSE create mode 100644 vendor/github.com/howeyc/crc16/.travis.yml create mode 100644 vendor/github.com/howeyc/crc16/LICENSE create mode 100644 vendor/github.com/howeyc/crc16/README.md create mode 100644 vendor/github.com/howeyc/crc16/crc16.go create mode 100644 vendor/github.com/howeyc/crc16/hash.go create mode 100644 vendor/github.com/kr/pretty/.gitignore create mode 100644 vendor/github.com/kr/pretty/License create mode 100644 vendor/github.com/kr/pretty/Readme create mode 100644 vendor/github.com/kr/pretty/diff.go create mode 100644 vendor/github.com/kr/pretty/formatter.go create mode 100644 vendor/github.com/kr/pretty/go.mod create mode 100644 vendor/github.com/kr/pretty/pretty.go create mode 100644 vendor/github.com/kr/pretty/zero.go create mode 100644 vendor/github.com/kr/text/License create mode 100644 vendor/github.com/kr/text/Readme create mode 100644 vendor/github.com/kr/text/doc.go create mode 100644 vendor/github.com/kr/text/go.mod create mode 100644 vendor/github.com/kr/text/indent.go create mode 100644 vendor/github.com/kr/text/wrap.go create mode 100644 vendor/github.com/ligato/cn-infra/.gitignore create mode 100644 vendor/github.com/ligato/cn-infra/.travis.yml create mode 100644 vendor/github.com/ligato/cn-infra/CHANGELOG.md create mode 100644 vendor/github.com/ligato/cn-infra/CONTRIBUTING.md create mode 100644 vendor/github.com/ligato/cn-infra/Gopkg.lock create mode 100644 vendor/github.com/ligato/cn-infra/Gopkg.toml create mode 100644 vendor/github.com/ligato/cn-infra/Makefile create mode 100644 vendor/github.com/ligato/cn-infra/README.md create mode 100644 vendor/github.com/ligato/cn-infra/datasync/grpcsync/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/datasync/grpcsync/grpc_watcher.go create mode 100644 vendor/github.com/ligato/cn-infra/datasync/grpcsync/msgservice.go create mode 100644 vendor/github.com/ligato/cn-infra/datasync/grpcsync/options.go create mode 100644 vendor/github.com/ligato/cn-infra/datasync/grpcsync/plugin_impl_grpsync.go create mode 100644 vendor/github.com/ligato/cn-infra/datasync/restsync/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/datasync/restsync/http_handlers.go create mode 100644 vendor/github.com/ligato/cn-infra/datasync/restsync/rest_watcher.go create mode 100644 vendor/github.com/ligato/cn-infra/datasync/syncbase/msg/change_event.go create mode 100644 vendor/github.com/ligato/cn-infra/datasync/syncbase/msg/datamsg.pb.go create mode 100644 vendor/github.com/ligato/cn-infra/datasync/syncbase/msg/datamsg.proto create mode 100644 vendor/github.com/ligato/cn-infra/datasync/syncbase/msg/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/db/README.md create mode 100644 vendor/github.com/ligato/cn-infra/db/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/consul/README.md create mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/consul/consul.conf create mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/consul/consul.go create mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/consul/options.go create mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/consul/plugin.go create mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/consul/txn.go create mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/etcd/mocks/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/etcd/mocks/embeded_etcd.go create mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/redis/README.md create mode 100755 vendor/github.com/ligato/cn-infra/db/keyval/redis/bytes_broker_impl.go create mode 100755 vendor/github.com/ligato/cn-infra/db/keyval/redis/bytes_txn_impl.go create mode 100755 vendor/github.com/ligato/cn-infra/db/keyval/redis/bytes_watcher_impl.go create mode 100755 vendor/github.com/ligato/cn-infra/db/keyval/redis/config.go create mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/redis/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/redis/options.go create mode 100644 vendor/github.com/ligato/cn-infra/db/keyval/redis/plugin_impl_redis.go create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/README.md create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/README.md create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassa_broker_impl.go create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassa_txn_impl.go create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassa_watcher_impl.go create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/cassandra.conf create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/config.go create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/options.go create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/plugin_impl_cassa.go create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/cassandra/query.go create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/plugin_api_sql.go create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/slice_utils.go create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/sql_broker_api.go create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/sql_expression.go create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/sql_struct_metadata.go create mode 100644 vendor/github.com/ligato/cn-infra/db/sql/sql_watcher_api.go create mode 100644 vendor/github.com/ligato/cn-infra/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/docker/dev_cn_infra/Dockerfile create mode 100644 vendor/github.com/ligato/cn-infra/docker/dev_cn_infra/README.md create mode 100755 vendor/github.com/ligato/cn-infra/docker/dev_cn_infra/build-agent.sh create mode 100755 vendor/github.com/ligato/cn-infra/docker/dev_cn_infra/build.sh create mode 100644 vendor/github.com/ligato/cn-infra/docker/dev_cn_infra/etcd.conf create mode 100644 vendor/github.com/ligato/cn-infra/docker/dev_cn_infra/kafka.conf create mode 100755 vendor/github.com/ligato/cn-infra/docker/dev_cn_infra/shrink.sh create mode 100644 vendor/github.com/ligato/cn-infra/docker/dev_cn_infra/supervisord.conf create mode 100644 vendor/github.com/ligato/cn-infra/docs/guidelines/CODINGSTYLE.md create mode 100644 vendor/github.com/ligato/cn-infra/docs/guidelines/CONFIG.md create mode 100644 vendor/github.com/ligato/cn-infra/docs/guidelines/DOCUMENTING.md create mode 100644 vendor/github.com/ligato/cn-infra/docs/guidelines/EXAMPLES.md create mode 100644 vendor/github.com/ligato/cn-infra/docs/guidelines/LOGGING.md create mode 100644 vendor/github.com/ligato/cn-infra/docs/guidelines/PLUGIN_DEPENDENCIES.md create mode 100644 vendor/github.com/ligato/cn-infra/docs/guidelines/PLUGIN_FLAVORS.md create mode 100644 vendor/github.com/ligato/cn-infra/docs/guidelines/PLUGIN_LIFECYCLE.md create mode 100644 vendor/github.com/ligato/cn-infra/docs/guidelines/README.md create mode 100644 vendor/github.com/ligato/cn-infra/docs/guidelines/SYSTEM_INTEGRATION.md create mode 100644 vendor/github.com/ligato/cn-infra/docs/guidelines/TESTING.md create mode 100644 vendor/github.com/ligato/cn-infra/docs/imgs/codecomplete_2nd_edition.jpg create mode 100644 vendor/github.com/ligato/cn-infra/docs/imgs/datasync_pub.png create mode 100644 vendor/github.com/ligato/cn-infra/docs/imgs/datasync_watch.png create mode 100644 vendor/github.com/ligato/cn-infra/docs/imgs/db.png create mode 100644 vendor/github.com/ligato/cn-infra/docs/imgs/flavors.png create mode 100644 vendor/github.com/ligato/cn-infra/docs/imgs/grpc.png create mode 100644 vendor/github.com/ligato/cn-infra/docs/imgs/high_level_arch_cninfra.png create mode 100644 vendor/github.com/ligato/cn-infra/docs/imgs/http.png create mode 100644 vendor/github.com/ligato/cn-infra/docs/imgs/idxmap_cache.png create mode 100644 vendor/github.com/ligato/cn-infra/docs/imgs/idxmap_local.png create mode 100644 vendor/github.com/ligato/cn-infra/docs/imgs/plugin_lifecycle.png create mode 100644 vendor/github.com/ligato/cn-infra/docs/imgs/status_check.png create mode 100644 vendor/github.com/ligato/cn-infra/docs/imgs/status_check_pull.png create mode 100644 vendor/github.com/ligato/cn-infra/docs/imgs/status_check_push.png create mode 100644 vendor/github.com/ligato/cn-infra/docs/readmes/cn_virtual_function.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/cassandra-lib/Readme.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/cassandra-lib/client-config.yaml create mode 100644 vendor/github.com/ligato/cn-infra/examples/cassandra-lib/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/cassandra-lib/main.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/configs-plugin/Readme.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/configs-plugin/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/configs-plugin/example.conf create mode 100644 vendor/github.com/ligato/cn-infra/examples/configs-plugin/main.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/consul-lib/main.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/datasync-plugin/Readme.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/datasync-plugin/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/datasync-plugin/etcd.conf create mode 100644 vendor/github.com/ligato/cn-infra/examples/datasync-plugin/main.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/etcd-lib/Makefile create mode 100644 vendor/github.com/ligato/cn-infra/examples/etcd-lib/Readme.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/etcd-lib/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/etcd-lib/editor/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/etcd-lib/editor/editor.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/etcd-lib/etcd.conf create mode 100644 vendor/github.com/ligato/cn-infra/examples/etcd-lib/model/phonebook/config.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/etcd-lib/model/phonebook/phonebook.pb.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/etcd-lib/model/phonebook/phonebook.proto create mode 100644 vendor/github.com/ligato/cn-infra/examples/etcd-lib/view/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/etcd-lib/view/view.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/etcd-lib/watcher/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/etcd-lib/watcher/watcher.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/flags-lib/Readme.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/flags-lib/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/flags-lib/main.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/grpc-plugin/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/grpc-plugin/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/grpc-plugin/grpc-client/main.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/grpc-plugin/grpc-server/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/grpc-plugin/grpc-server/grpc.conf create mode 100644 vendor/github.com/ligato/cn-infra/examples/grpc-plugin/grpc-server/main.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/Makefile create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/asyncproducer/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/asyncproducer/asyncproducer.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/asyncproducer/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/consumer/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/consumer/consumer.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/consumer/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/mux/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/mux/config create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/mux/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/mux/main.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/syncproducer/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/syncproducer/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/syncproducer/syncproducer.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/utils/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-lib/utils/prompter.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-plugin/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-plugin/hash-partitioner/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-plugin/hash-partitioner/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-plugin/hash-partitioner/kafka.conf create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-plugin/hash-partitioner/main.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-plugin/manual-partitioner/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-plugin/manual-partitioner/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-plugin/manual-partitioner/kafka.conf create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-plugin/manual-partitioner/main.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-plugin/manual-partitioner/server.properties create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-plugin/post-init-consumer/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-plugin/post-init-consumer/kafka.conf create mode 100644 vendor/github.com/ligato/cn-infra/examples/kafka-plugin/post-init-consumer/main.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/logs-lib/Makefile create mode 100644 vendor/github.com/ligato/cn-infra/examples/logs-lib/basic/REDME.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/logs-lib/basic/basic.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/logs-lib/basic/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/logs-lib/custom/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/logs-lib/custom/custom.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/logs-lib/custom/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/logs-lib/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/logs-lib/http/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/logs-lib/http/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/logs-lib/http/server.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/logs-plugin/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/logs-plugin/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/logs-plugin/logs.conf create mode 100644 vendor/github.com/ligato/cn-infra/examples/logs-plugin/main.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/model/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/model/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/model/example.pb.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/model/example.proto create mode 100644 vendor/github.com/ligato/cn-infra/examples/prometheus-plugin/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/prometheus-plugin/main.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-lib/Makefile create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-lib/Readme.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-lib/airport/airport.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-lib/airport/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-lib/airport/model/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-lib/airport/model/flight.pb.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-lib/airport/model/flight.proto create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-lib/cluster-client.yaml create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-lib/diagram.png create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-lib/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-lib/node-client.yaml create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-lib/sentinel-client.yaml create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-lib/simple/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-lib/simple/simple.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-plugin/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-plugin/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/redis-plugin/main.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/simple-agent/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/simple-agent/agent.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/simple-agent/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/statuscheck-plugin/README.md create mode 100644 vendor/github.com/ligato/cn-infra/examples/statuscheck-plugin/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/examples/statuscheck-plugin/etcd.conf create mode 100644 vendor/github.com/ligato/cn-infra/examples/statuscheck-plugin/main.go create mode 100644 vendor/github.com/ligato/cn-infra/health/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/health/probe/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/health/probe/options.go create mode 100644 vendor/github.com/ligato/cn-infra/health/probe/plugin_impl_probe.go create mode 100644 vendor/github.com/ligato/cn-infra/health/probe/prometheus_probe.go create mode 100644 vendor/github.com/ligato/cn-infra/health/statuscheck/pluginstatusmap/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/health/statuscheck/pluginstatusmap/plugin_status_map.go create mode 100644 vendor/github.com/ligato/cn-infra/logging/logmanager/README.md create mode 100644 vendor/github.com/ligato/cn-infra/logging/logmanager/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/logging/logmanager/options.go create mode 100644 vendor/github.com/ligato/cn-infra/logging/logmanager/plugin_impl_log_manager.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/README.md create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/client/asyncproducer.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/client/config.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/client/consumer.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/client/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/client/messages.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/client/mocks.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/client/syncproducer.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/kafka.conf create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/README.md create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/bytes_connection.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/chan.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/config.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/mock.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/multiplexer.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/mux/proto_connection.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/options.go create mode 100644 vendor/github.com/ligato/cn-infra/messaging/kafka/plugin_impl_kafka.go create mode 100644 vendor/github.com/ligato/cn-infra/rpc/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/rpc/rest/mock/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/rpc/rest/mock/httpmock.go create mode 100755 vendor/github.com/ligato/cn-infra/scripts/check_links.sh create mode 100755 vendor/github.com/ligato/cn-infra/scripts/gofmt.sh create mode 100755 vendor/github.com/ligato/cn-infra/scripts/static_analysis.sh create mode 100755 vendor/github.com/ligato/cn-infra/scripts/test_examples/docker_start_stop_functions.sh create mode 100755 vendor/github.com/ligato/cn-infra/scripts/test_examples/plugin_reconnect.sh create mode 100755 vendor/github.com/ligato/cn-infra/scripts/test_examples/test_examples.sh create mode 100644 vendor/github.com/ligato/cn-infra/utils/README.md create mode 100644 vendor/github.com/ligato/cn-infra/utils/clienttls/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/utils/clienttls/tlsutil.go create mode 100644 vendor/github.com/ligato/cn-infra/utils/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/utils/runtimeutils/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/utils/runtimeutils/runtimeutils.go create mode 100644 vendor/github.com/ligato/cn-infra/utils/structs/doc.go create mode 100644 vendor/github.com/ligato/cn-infra/utils/structs/structs_reflection.go create mode 100644 vendor/github.com/maraino/go-mock/.travis.yml create mode 100644 vendor/github.com/maraino/go-mock/AUTHORS create mode 100644 vendor/github.com/maraino/go-mock/LICENSE create mode 100644 vendor/github.com/maraino/go-mock/Makefile create mode 100644 vendor/github.com/maraino/go-mock/README.md create mode 100644 vendor/github.com/maraino/go-mock/mock.go create mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE create mode 100644 vendor/github.com/mitchellh/go-homedir/README.md create mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go create mode 100644 vendor/github.com/mitchellh/mapstructure/.travis.yml create mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/mapstructure/README.md create mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go create mode 100644 vendor/github.com/mitchellh/mapstructure/error.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 vendor/github.com/pierrec/lz4/.gitignore create mode 100644 vendor/github.com/pierrec/lz4/.travis.yml create mode 100644 vendor/github.com/pierrec/lz4/LICENSE create mode 100644 vendor/github.com/pierrec/lz4/README.md create mode 100644 vendor/github.com/pierrec/lz4/block.go create mode 100644 vendor/github.com/pierrec/lz4/debug.go create mode 100644 vendor/github.com/pierrec/lz4/debug_stub.go create mode 100644 vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go create mode 100644 vendor/github.com/pierrec/lz4/lz4.go create mode 100644 vendor/github.com/pierrec/lz4/lz4_go1.10.go create mode 100644 vendor/github.com/pierrec/lz4/lz4_notgo1.10.go create mode 100644 vendor/github.com/pierrec/lz4/reader.go create mode 100644 vendor/github.com/pierrec/lz4/writer.go create mode 100644 vendor/github.com/rcrowley/go-metrics/.gitignore create mode 100644 vendor/github.com/rcrowley/go-metrics/.travis.yml create mode 100644 vendor/github.com/rcrowley/go-metrics/LICENSE create mode 100644 vendor/github.com/rcrowley/go-metrics/README.md create mode 100644 vendor/github.com/rcrowley/go-metrics/counter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/debug.go create mode 100644 vendor/github.com/rcrowley/go-metrics/ewma.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge_float64.go create mode 100644 vendor/github.com/rcrowley/go-metrics/graphite.go create mode 100644 vendor/github.com/rcrowley/go-metrics/healthcheck.go create mode 100644 vendor/github.com/rcrowley/go-metrics/histogram.go create mode 100644 vendor/github.com/rcrowley/go-metrics/json.go create mode 100644 vendor/github.com/rcrowley/go-metrics/log.go create mode 100644 vendor/github.com/rcrowley/go-metrics/memory.md create mode 100644 vendor/github.com/rcrowley/go-metrics/meter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/metrics.go create mode 100644 vendor/github.com/rcrowley/go-metrics/opentsdb.go create mode 100644 vendor/github.com/rcrowley/go-metrics/registry.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/sample.go create mode 100644 vendor/github.com/rcrowley/go-metrics/syslog.go create mode 100644 vendor/github.com/rcrowley/go-metrics/timer.go create mode 100755 vendor/github.com/rcrowley/go-metrics/validate.sh create mode 100644 vendor/github.com/rcrowley/go-metrics/writer.go create mode 100644 vendor/github.com/willfaught/gockle/.travis.yml create mode 100644 vendor/github.com/willfaught/gockle/batch.go create mode 100644 vendor/github.com/willfaught/gockle/doc.go create mode 100644 vendor/github.com/willfaught/gockle/iterator.go create mode 100644 vendor/github.com/willfaught/gockle/license.md create mode 100644 vendor/github.com/willfaught/gockle/readme.md create mode 100644 vendor/github.com/willfaught/gockle/session.go create mode 100644 vendor/gopkg.in/inf.v0/LICENSE create mode 100644 vendor/gopkg.in/inf.v0/dec.go create mode 100644 vendor/gopkg.in/inf.v0/rounder.go diff --git a/Gopkg.lock b/Gopkg.lock index 007dc509d9..b3e7e90542 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -37,6 +37,15 @@ packages = ["."] revision = "cd527374f1e5bff4938207604a14f2e38a9cf512" +[[projects]] + name = "github.com/Shopify/sarama" + packages = [ + ".", + "mocks" + ] + revision = "35324cf48e33d8260e1c7c18854465a904ade249" + version = "v1.17.0" + [[projects]] branch = "master" name = "github.com/bennyscetbun/jsongo" @@ -49,6 +58,12 @@ packages = ["quantile"] revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" +[[projects]] + name = "github.com/bsm/sarama-cluster" + packages = ["."] + revision = "cf455bc755fe41ac9bb2861e7a961833d9c2ecc3" + version = "v2.1.13" + [[projects]] branch = "master" name = "github.com/buger/goterm" @@ -75,6 +90,12 @@ revision = "66722b1ada68fcd5227db853ee92003169a975c8" version = "v3.2.0" +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + [[projects]] name = "github.com/docker/docker" packages = [ @@ -118,6 +139,24 @@ revision = "0dadbb0345b35ec7ef35e228dabb8de89a65bf52" version = "v0.3.2" +[[projects]] + name = "github.com/eapache/go-resiliency" + packages = ["breaker"] + revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce" + version = "v1.1.0" + +[[projects]] + branch = "master" + name = "github.com/eapache/go-xerial-snappy" + packages = ["."] + revision = "040cc1a32f578808623071247fdbd5cc43f37f5f" + +[[projects]] + name = "github.com/eapache/queue" + packages = ["."] + revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" + version = "v1.1.0" + [[projects]] name = "github.com/elazarl/go-bindata-assetfs" packages = ["."] @@ -141,6 +180,32 @@ revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" version = "v1.0.0" +[[projects]] + name = "github.com/go-redis/redis" + packages = [ + ".", + "internal", + "internal/consistenthash", + "internal/hashtag", + "internal/pool", + "internal/proto", + "internal/singleflight", + "internal/util" + ] + revision = "480db94d33e6088e08d628833b6c0705451d24bb" + version = "v6.13.2" + +[[projects]] + branch = "master" + name = "github.com/gocql/gocql" + packages = [ + ".", + "internal/lru", + "internal/murmur", + "internal/streams" + ] + revision = "e06f8c1bcd787e6bf0608288b314522f08cc7848" + [[projects]] name = "github.com/gogo/protobuf" packages = [ @@ -179,6 +244,12 @@ packages = ["proto"] revision = "c65a0412e71e8b9b3bfd22925720d23c0f054237" +[[projects]] + branch = "master" + name = "github.com/golang/snappy" + packages = ["."] + revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" + [[projects]] name = "github.com/gorilla/context" packages = ["."] @@ -191,18 +262,60 @@ revision = "53c1911da2b537f792e7cafcb446b05ffe33b996" version = "v1.6.1" +[[projects]] + branch = "master" + name = "github.com/hailocab/go-hostpool" + packages = ["."] + revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478" + +[[projects]] + name = "github.com/hashicorp/consul" + packages = ["api"] + revision = "39f93f011e591c842acc8053a7f5972aa6e592fd" + version = "v1.2.1" + [[projects]] branch = "master" name = "github.com/hashicorp/go-cleanhttp" packages = ["."] revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d" +[[projects]] + branch = "master" + name = "github.com/hashicorp/go-rootcerts" + packages = ["."] + revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" + +[[projects]] + name = "github.com/hashicorp/serf" + packages = ["coordinate"] + revision = "d6574a5bb1226678d7010325fb6c985db20ee458" + version = "v0.8.1" + +[[projects]] + branch = "master" + name = "github.com/howeyc/crc16" + packages = ["."] + revision = "2b2a61e366a66d3efb279e46176e7291001e0354" + [[projects]] name = "github.com/inconshreveable/mousetrap" packages = ["."] revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" version = "v1.0" +[[projects]] + name = "github.com/kr/pretty" + packages = ["."] + revision = "73f6ac0b30a98e433b289500d779f50c1a6f0712" + version = "v0.1.0" + +[[projects]] + name = "github.com/kr/text" + packages = ["."] + revision = "e2ffdb16a802fe2bb95e2e35ff34f0e53aeef34f" + version = "v0.1.0" + [[projects]] name = "github.com/ligato/cn-infra" packages = [ @@ -215,24 +328,35 @@ "datasync/resync", "datasync/syncbase", "db/keyval", + "db/keyval/consul", "db/keyval/etcd", "db/keyval/kvproto", + "db/keyval/redis", + "db/sql", + "db/sql/cassandra", + "health/probe", "health/statuscheck", "health/statuscheck/model/status", "idxmap", "idxmap/mem", "infra", "logging", + "logging/logmanager", "logging/logrus", "logging/measure", "messaging", + "messaging/kafka", + "messaging/kafka/client", + "messaging/kafka/mux", "rpc/grpc", "rpc/prometheus", "rpc/rest", "servicelabel", "utils/addrs", + "utils/clienttls", "utils/once", - "utils/safeclose" + "utils/safeclose", + "utils/structs" ] revision = "52432e9cff91cd15e5cd0001be7e1034253adf0a" @@ -248,12 +372,30 @@ packages = ["."] revision = "ef56447db6a068ad9e52bc54a1aff5fb9e1ed2dd" +[[projects]] + branch = "master" + name = "github.com/maraino/go-mock" + packages = ["."] + revision = "4c74c434cd3a9e9a70ed1eeb56646a1d3fac372f" + [[projects]] name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" version = "v1.0.0" +[[projects]] + branch = "master" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66" + +[[projects]] + branch = "master" + name = "github.com/mitchellh/mapstructure" + packages = ["."] + revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac" + [[projects]] name = "github.com/namsral/flag" packages = ["."] @@ -303,6 +445,15 @@ revision = "baf6536d6259209c3edfa2b22237af82942d3dfa" version = "v0.1.1" +[[projects]] + name = "github.com/pierrec/lz4" + packages = [ + ".", + "internal/xxh32" + ] + revision = "1958fd8fff7f115e79725b1288e0b878b3e06b00" + version = "v2.0.3" + [[projects]] name = "github.com/pkg/errors" packages = ["."] @@ -345,6 +496,12 @@ ] revision = "85fadb6e89903ef7cca6f6a804474cd5ea85b6e1" +[[projects]] + branch = "master" + name = "github.com/rcrowley/go-metrics" + packages = ["."] + revision = "e2704e165165ec55d062f5919b4b29494e9fa790" + [[projects]] name = "github.com/satori/go.uuid" packages = ["."] @@ -396,6 +553,12 @@ packages = ["."] revision = "be1fbeda19366dea804f00efff2dd73a1642fdcc" +[[projects]] + branch = "master" + name = "github.com/willfaught/gockle" + packages = ["."] + revision = "4f254e1e0f0a12485963192ff605f61f1933e71f" + [[projects]] branch = "master" name = "golang.org/x/crypto" @@ -481,6 +644,12 @@ revision = "8050b9cbc271307e5a716a9d782803d09b0d6f2d" version = "v1.2.1" +[[projects]] + name = "gopkg.in/inf.v0" + packages = ["."] + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" + [[projects]] branch = "v2" name = "gopkg.in/yaml.v2" @@ -490,6 +659,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "14676c37735f9da673c5b437ebe2f1ddabdd1764b2e0bc2a98b756ec37073352" + inputs-digest = "b1fe4364c3164014bab7d205890413926c9768bcebe275b248ece2222295592e" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index ea3908f531..b678a332e6 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -105,3 +105,7 @@ required = [ [prune] go-tests = true unused-packages = true + + [[prune.project]] + name = "github.com/ligato/cn-infra" + unused-packages = false diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore new file mode 100644 index 0000000000..c6c482dca8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so +*.test + +# Folders +_obj +_test +.vagrant + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +coverage.txt diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml new file mode 100644 index 0000000000..ea295ec5f0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/.travis.yml @@ -0,0 +1,36 @@ +language: go +go: +- 1.8.x +- 1.9.x +- 1.10.x + +env: + global: + - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095 + - TOXIPROXY_ADDR=http://localhost:8474 + - KAFKA_INSTALL_ROOT=/home/travis/kafka + - KAFKA_HOSTNAME=localhost + - DEBUG=true + matrix: + - KAFKA_VERSION=0.11.0.2 + - KAFKA_VERSION=1.0.0 + - KAFKA_VERSION=1.1.0 + +before_install: +- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR} +- vagrant/install_cluster.sh +- vagrant/boot_cluster.sh +- vagrant/create_topics.sh + +install: make install_dependencies + +script: +- make test +- make vet +- make errcheck +- make fmt + +after_success: +- bash <(curl -s https://codecov.io/bash) + +after_script: vagrant/halt_cluster.sh diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md new file mode 100644 index 0000000000..16d5829c99 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md @@ -0,0 +1,541 @@ +# Changelog + +#### Version 1.17.0 (2018-05-30) + +New Features: + - Add support for gzip compression levels + ([#1044](https://github.com/Shopify/sarama/pull/1044)). + - Add support for Metadata request/response pairs versions v1 to v5 + ([#1047](https://github.com/Shopify/sarama/pull/1047), + [#1069](https://github.com/Shopify/sarama/pull/1069)). + - Add versioning to JoinGroup request/response pairs + ([#1098](https://github.com/Shopify/sarama/pull/1098)) + - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs + ([#1065](https://github.com/Shopify/sarama/pull/1065), + [#1096](https://github.com/Shopify/sarama/pull/1096), + [#1027](https://github.com/Shopify/sarama/pull/1027)). + - Add `Controller()` method to Client interface + ([#1063](https://github.com/Shopify/sarama/pull/1063)). + +Improvements: + - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp + ([#1010](https://github.com/Shopify/sarama/pull/1010)). + - Expose missing protocol parts: `msgSet` and `recordBatch` + ([#1049](https://github.com/Shopify/sarama/pull/1049)). + - Add support for v1 DeleteTopics Request + ([#1052](https://github.com/Shopify/sarama/pull/1052)). + - Add support for Go 1.10 + ([#1064](https://github.com/Shopify/sarama/pull/1064)). + - Claim support for Kafka 1.1.0 + ([#1073](https://github.com/Shopify/sarama/pull/1073)). + +Bug Fixes: + - Fix FindCoordinatorResponse.encode to allow nil Coordinator + ([#1050](https://github.com/Shopify/sarama/pull/1050), + [#1051](https://github.com/Shopify/sarama/pull/1051)). + - Clear all metadata when we have the latest topic info + ([#1033](https://github.com/Shopify/sarama/pull/1033)). + - Make `PartitionConsumer.Close` idempotent + ([#1092](https://github.com/Shopify/sarama/pull/1092)). + +#### Version 1.16.0 (2018-02-12) + +New Features: + - Add support for the Create/Delete Topics request/response pairs + ([#1007](https://github.com/Shopify/sarama/pull/1007), + [#1008](https://github.com/Shopify/sarama/pull/1008)). + - Add support for the Describe/Create/Delete ACL request/response pairs + ([#1009](https://github.com/Shopify/sarama/pull/1009)). + - Add support for the five transaction-related request/response pairs + ([#1016](https://github.com/Shopify/sarama/pull/1016)). + +Improvements: + - Permit setting version on mock producer responses + ([#999](https://github.com/Shopify/sarama/pull/999)). + - Add `NewMockBrokerListener` helper for testing TLS connections + ([#1019](https://github.com/Shopify/sarama/pull/1019)). + - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB + which results in much higher throughput in most cases + ([#1024](https://github.com/Shopify/sarama/pull/1024)). + - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to + reduce CPU and memory usage when processing many partitions + ([#1028](https://github.com/Shopify/sarama/pull/1028)). + - Assign relative offsets to messages in the producer to save the brokers a + recompression pass + ([#1002](https://github.com/Shopify/sarama/pull/1002), + [#1015](https://github.com/Shopify/sarama/pull/1015)). + +Bug Fixes: + - Fix producing uncompressed batches with the new protocol format + ([#1032](https://github.com/Shopify/sarama/issues/1032)). + - Fix consuming compacted topics with the new protocol format + ([#1005](https://github.com/Shopify/sarama/issues/1005)). + - Fix consuming topics with a mix of protocol formats + ([#1021](https://github.com/Shopify/sarama/issues/1021)). + - Fix consuming when the broker includes multiple batches in a single response + ([#1022](https://github.com/Shopify/sarama/issues/1022)). + - Fix detection of `PartialTrailingMessage` when the partial message was + truncated before the magic value indicating its version + ([#1030](https://github.com/Shopify/sarama/pull/1030)). + - Fix expectation-checking in the mock of `SyncProducer.SendMessages` + ([#1035](https://github.com/Shopify/sarama/pull/1035)). + +#### Version 1.15.0 (2017-12-08) + +New Features: + - Claim official support for Kafka 1.0, though it did already work + ([#984](https://github.com/Shopify/sarama/pull/984)). + - Helper methods for Kafka version numbers to/from strings + ([#989](https://github.com/Shopify/sarama/pull/989)). + - Implement CreatePartitions request/response + ([#985](https://github.com/Shopify/sarama/pull/985)). + +Improvements: + - Add error codes 45-60 + ([#986](https://github.com/Shopify/sarama/issues/986)). + +Bug Fixes: + - Fix slow consuming for certain Kafka 0.11/1.0 configurations + ([#982](https://github.com/Shopify/sarama/pull/982)). + - Correctly determine when a FetchResponse contains the new message format + ([#990](https://github.com/Shopify/sarama/pull/990)). + - Fix producing with multiple headers + ([#996](https://github.com/Shopify/sarama/pull/996)). + - Fix handling of truncated record batches + ([#998](https://github.com/Shopify/sarama/pull/998)). + - Fix leaking metrics when closing brokers + ([#991](https://github.com/Shopify/sarama/pull/991)). + +#### Version 1.14.0 (2017-11-13) + +New Features: + - Add support for the new Kafka 0.11 record-batch format, including the wire + protocol and the necessary behavioural changes in the producer and consumer. + Transactions and idempotency are not yet supported, but producing and + consuming should work with all the existing bells and whistles (batching, + compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta + of Arista Networks for this work. Part of + ([#901](https://github.com/Shopify/sarama/issues/901)). + +Bug Fixes: + - Fix encoding of ProduceResponse versions in test + ([#970](https://github.com/Shopify/sarama/pull/970)). + - Return partial replicas list when we have it + ([#975](https://github.com/Shopify/sarama/pull/975)). + +#### Version 1.13.0 (2017-10-04) + +New Features: + - Support for FetchRequest version 3 + ([#905](https://github.com/Shopify/sarama/pull/905)). + - Permit setting version on mock FetchResponses + ([#939](https://github.com/Shopify/sarama/pull/939)). + - Add a configuration option to support storing only minimal metadata for + extremely large clusters + ([#937](https://github.com/Shopify/sarama/pull/937)). + - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets + ([#932](https://github.com/Shopify/sarama/pull/932)). + +Improvements: + - Provide the block-level timestamp when consuming compressed messages + ([#885](https://github.com/Shopify/sarama/issues/885)). + - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned + by the broker, which can be meaningful + ([#930](https://github.com/Shopify/sarama/pull/930)). + - Use a `Ticker` to reduce consumer timer overhead at the cost of higher + variance in the actual timeout + ([#933](https://github.com/Shopify/sarama/pull/933)). + +Bug Fixes: + - Gracefully handle messages with negative timestamps + ([#907](https://github.com/Shopify/sarama/pull/907)). + - Raise a proper error when encountering an unknown message version + ([#940](https://github.com/Shopify/sarama/pull/940)). + +#### Version 1.12.0 (2017-05-08) + +New Features: + - Added support for the `ApiVersions` request and response pair, and Kafka + version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note + that you still need to specify the Kafka version in the Sarama configuration + for the time being. + - Added a `Brokers` method to the Client which returns the complete set of + active brokers ([#813](https://github.com/Shopify/sarama/pull/813)). + - Added an `InSyncReplicas` method to the Client which returns the set of all + in-sync broker IDs for the given partition, now that the Kafka versions for + which this was misleading are no longer in our supported set + ([#872](https://github.com/Shopify/sarama/pull/872)). + - Added a `NewCustomHashPartitioner` method which allows constructing a hash + partitioner with a custom hash method in case the default (FNV-1a) is not + suitable + ([#837](https://github.com/Shopify/sarama/pull/837), + [#841](https://github.com/Shopify/sarama/pull/841)). + +Improvements: + - Recognize more Kafka error codes + ([#859](https://github.com/Shopify/sarama/pull/859)). + +Bug Fixes: + - Fix an issue where decoding a malformed FetchRequest would not return the + correct error ([#818](https://github.com/Shopify/sarama/pull/818)). + - Respect ordering of group protocols in JoinGroupRequests. This fix is + transparent if you're using the `AddGroupProtocol` or + `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from + the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` + ([#812](https://github.com/Shopify/sarama/issues/812)). + - Fix an alignment-related issue with atomics on 32-bit architectures + ([#859](https://github.com/Shopify/sarama/pull/859)). + +#### Version 1.11.0 (2016-12-20) + +_Important:_ As of Sarama 1.11 it is necessary to set the config value of +`Producer.Return.Successes` to true in order to use the SyncProducer. Previous +versions would silently override this value when instantiating a SyncProducer +which led to unexpected values and data races. + +New Features: + - Metrics! Thanks to Sébastien Launay for all his work on this feature + ([#701](https://github.com/Shopify/sarama/pull/701), + [#746](https://github.com/Shopify/sarama/pull/746), + [#766](https://github.com/Shopify/sarama/pull/766)). + - Add support for LZ4 compression + ([#786](https://github.com/Shopify/sarama/pull/786)). + - Add support for ListOffsetRequest v1 and Kafka 0.10.1 + ([#775](https://github.com/Shopify/sarama/pull/775)). + - Added a `HighWaterMarks` method to the Consumer which aggregates the + `HighWaterMarkOffset` values of its child topic/partitions + ([#769](https://github.com/Shopify/sarama/pull/769)). + +Bug Fixes: + - Fixed producing when using timestamps, compression and Kafka 0.10 + ([#759](https://github.com/Shopify/sarama/pull/759)). + - Added missing decoder methods to DescribeGroups response + ([#756](https://github.com/Shopify/sarama/pull/756)). + - Fix producer shutdown when `Return.Errors` is disabled + ([#787](https://github.com/Shopify/sarama/pull/787)). + - Don't mutate configuration in SyncProducer + ([#790](https://github.com/Shopify/sarama/pull/790)). + - Fix crash on SASL initialization failure + ([#795](https://github.com/Shopify/sarama/pull/795)). + +#### Version 1.10.1 (2016-08-30) + +Bug Fixes: + - Fix the documentation for `HashPartitioner` which was incorrect + ([#717](https://github.com/Shopify/sarama/pull/717)). + - Permit client creation even when it is limited by ACLs + ([#722](https://github.com/Shopify/sarama/pull/722)). + - Several fixes to the consumer timer optimization code, regressions introduced + in v1.10.0. Go's timers are finicky + ([#730](https://github.com/Shopify/sarama/pull/730), + [#733](https://github.com/Shopify/sarama/pull/733), + [#734](https://github.com/Shopify/sarama/pull/734)). + - Handle consuming compressed relative offsets with Kafka 0.10 + ([#735](https://github.com/Shopify/sarama/pull/735)). + +#### Version 1.10.0 (2016-08-02) + +_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of +Kafka you are running against (via the `config.Version` value) in order to use +features that may not be compatible with old Kafka versions. If you don't +specify this value it will default to 0.8.2 (the minimum supported), and trying +to use more recent features (like the offset manager) will fail with an error. + +_Also:_ The offset-manager's behaviour has been changed to match the upstream +java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and +[#713](https://github.com/Shopify/sarama/pull/713)). If you use the +offset-manager, please ensure that you are committing one *greater* than the +last consumed message offset or else you may end up consuming duplicate +messages. + +New Features: + - Support for Kafka 0.10 + ([#672](https://github.com/Shopify/sarama/pull/672), + [#678](https://github.com/Shopify/sarama/pull/678), + [#681](https://github.com/Shopify/sarama/pull/681), and others). + - Support for configuring the target Kafka version + ([#676](https://github.com/Shopify/sarama/pull/676)). + - Batch producing support in the SyncProducer + ([#677](https://github.com/Shopify/sarama/pull/677)). + - Extend producer mock to allow setting expectations on message contents + ([#667](https://github.com/Shopify/sarama/pull/667)). + +Improvements: + - Support `nil` compressed messages for deleting in compacted topics + ([#634](https://github.com/Shopify/sarama/pull/634)). + - Pre-allocate decoding errors, greatly reducing heap usage and GC time against + misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)). + - Re-use consumer expiry timers, removing one allocation per consumed message + ([#707](https://github.com/Shopify/sarama/pull/707)). + +Bug Fixes: + - Actually default the client ID to "sarama" like we say we do + ([#664](https://github.com/Shopify/sarama/pull/664)). + - Fix a rare issue where `Client.Leader` could return the wrong error + ([#685](https://github.com/Shopify/sarama/pull/685)). + - Fix a possible tight loop in the consumer + ([#693](https://github.com/Shopify/sarama/pull/693)). + - Match upstream's offset-tracking behaviour + ([#705](https://github.com/Shopify/sarama/pull/705)). + - Report UnknownTopicOrPartition errors from the offset manager + ([#706](https://github.com/Shopify/sarama/pull/706)). + - Fix possible negative partition value from the HashPartitioner + ([#709](https://github.com/Shopify/sarama/pull/709)). + +#### Version 1.9.0 (2016-05-16) + +New Features: + - Add support for custom offset manager retention durations + ([#602](https://github.com/Shopify/sarama/pull/602)). + - Publish low-level mocks to enable testing of third-party producer/consumer + implementations ([#570](https://github.com/Shopify/sarama/pull/570)). + - Declare support for Golang 1.6 + ([#611](https://github.com/Shopify/sarama/pull/611)). + - Support for SASL plain-text auth + ([#648](https://github.com/Shopify/sarama/pull/648)). + +Improvements: + - Simplified broker locking scheme slightly + ([#604](https://github.com/Shopify/sarama/pull/604)). + - Documentation cleanup + ([#605](https://github.com/Shopify/sarama/pull/605), + [#621](https://github.com/Shopify/sarama/pull/621), + [#654](https://github.com/Shopify/sarama/pull/654)). + +Bug Fixes: + - Fix race condition shutting down the OffsetManager + ([#658](https://github.com/Shopify/sarama/pull/658)). + +#### Version 1.8.0 (2016-02-01) + +New Features: + - Full support for Kafka 0.9: + - All protocol messages and fields + ([#586](https://github.com/Shopify/sarama/pull/586), + [#588](https://github.com/Shopify/sarama/pull/588), + [#590](https://github.com/Shopify/sarama/pull/590)). + - Verified that TLS support works + ([#581](https://github.com/Shopify/sarama/pull/581)). + - Fixed the OffsetManager compatibility + ([#585](https://github.com/Shopify/sarama/pull/585)). + +Improvements: + - Optimize for fewer system calls when reading from the network + ([#584](https://github.com/Shopify/sarama/pull/584)). + - Automatically retry `InvalidMessage` errors to match upstream behaviour + ([#589](https://github.com/Shopify/sarama/pull/589)). + +#### Version 1.7.0 (2015-12-11) + +New Features: + - Preliminary support for Kafka 0.9 + ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several + caveats: + - Protocol-layer support is mostly in place + ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9 + renamed some messages and fields, which we did not in order to preserve API + compatibility. + - The producer and consumer work against 0.9, but the offset manager does + not ([#573](https://github.com/Shopify/sarama/pull/573)). + - TLS support may or may not work + ([#581](https://github.com/Shopify/sarama/pull/581)). + +Improvements: + - Don't wait for request timeouts on dead brokers, greatly speeding recovery + when the TCP connection is left hanging + ([#548](https://github.com/Shopify/sarama/pull/548)). + - Refactored part of the producer. The new version provides a much more elegant + solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also + slightly more efficient, and much more precise in calculating batch sizes + when compression is used + ([#549](https://github.com/Shopify/sarama/pull/549), + [#550](https://github.com/Shopify/sarama/pull/550), + [#551](https://github.com/Shopify/sarama/pull/551)). + +Bug Fixes: + - Fix race condition in consumer test mock + ([#553](https://github.com/Shopify/sarama/pull/553)). + +#### Version 1.6.1 (2015-09-25) + +Bug Fixes: + - Fix panic that could occur if a user-supplied message value failed to encode + ([#449](https://github.com/Shopify/sarama/pull/449)). + +#### Version 1.6.0 (2015-09-04) + +New Features: + - Implementation of a consumer offset manager using the APIs introduced in + Kafka 0.8.2. The API is designed mainly for integration into a future + high-level consumer, not for direct use, although it is *possible* to use it + directly. + ([#461](https://github.com/Shopify/sarama/pull/461)). + +Improvements: + - CRC32 calculation is much faster on machines with SSE4.2 instructions, + removing a major hotspot from most profiles + ([#255](https://github.com/Shopify/sarama/pull/255)). + +Bug Fixes: + - Make protocol decoding more robust against some malformed packets generated + by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523), + [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways + ([#528](https://github.com/Shopify/sarama/pull/528)). + - Fix a potential race condition panic in the consumer on shutdown + ([#529](https://github.com/Shopify/sarama/pull/529)). + +#### Version 1.5.0 (2015-08-17) + +New Features: + - TLS-encrypted network connections are now supported. This feature is subject + to change when Kafka releases built-in TLS support, but for now this is + enough to work with TLS-terminating proxies + ([#154](https://github.com/Shopify/sarama/pull/154)). + +Improvements: + - The consumer will not block if a single partition is not drained by the user; + all other partitions will continue to consume normally + ([#485](https://github.com/Shopify/sarama/pull/485)). + - Formatting of error strings has been much improved + ([#495](https://github.com/Shopify/sarama/pull/495)). + - Internal refactoring of the producer for code cleanliness and to enable + future work ([#300](https://github.com/Shopify/sarama/pull/300)). + +Bug Fixes: + - Fix a potential deadlock in the consumer on shutdown + ([#475](https://github.com/Shopify/sarama/pull/475)). + +#### Version 1.4.3 (2015-07-21) + +Bug Fixes: + - Don't include the partitioner in the producer's "fetch partitions" + circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)). + - Don't retry messages until the broker is closed when abandoning a broker in + the producer ([#468](https://github.com/Shopify/sarama/pull/468)). + - Update the import path for snappy-go, it has moved again and the API has + changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). + +#### Version 1.4.2 (2015-05-27) + +Bug Fixes: + - Update the import path for snappy-go, it has moved from google code to github + ([#456](https://github.com/Shopify/sarama/pull/456)). + +#### Version 1.4.1 (2015-05-25) + +Improvements: + - Optimizations when decoding snappy messages, thanks to John Potocny + ([#446](https://github.com/Shopify/sarama/pull/446)). + +Bug Fixes: + - Fix hypothetical race conditions on producer shutdown + ([#450](https://github.com/Shopify/sarama/pull/450), + [#451](https://github.com/Shopify/sarama/pull/451)). + +#### Version 1.4.0 (2015-05-01) + +New Features: + - The consumer now implements `Topics()` and `Partitions()` methods to enable + users to dynamically choose what topics/partitions to consume without + instantiating a full client + ([#431](https://github.com/Shopify/sarama/pull/431)). + - The partition-consumer now exposes the high water mark offset value returned + by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)). + - Added a `kafka-console-consumer` tool capable of handling multiple + partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` + ([#439](https://github.com/Shopify/sarama/pull/439), + [#442](https://github.com/Shopify/sarama/pull/442)). + +Improvements: + - The producer's logging during retry scenarios is more consistent, more + useful, and slightly less verbose + ([#429](https://github.com/Shopify/sarama/pull/429)). + - The client now shuffles its initial list of seed brokers in order to prevent + thundering herd on the first broker in the list + ([#441](https://github.com/Shopify/sarama/pull/441)). + +Bug Fixes: + - The producer now correctly manages its state if retries occur when it is + shutting down, fixing several instances of confusing behaviour and at least + one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)). + - The consumer now handles messages for different partitions asynchronously, + making it much more resilient to specific user code ordering + ([#325](https://github.com/Shopify/sarama/pull/325)). + +#### Version 1.3.0 (2015-04-16) + +New Features: + - The client now tracks consumer group coordinators using + ConsumerMetadataRequests similar to how it tracks partition leadership using + regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)). + This adds two methods to the client API: + - `Coordinator(consumerGroup string) (*Broker, error)` + - `RefreshCoordinator(consumerGroup string) error` + +Improvements: + - ConsumerMetadataResponses now automatically create a Broker object out of the + ID/address/port combination for the Coordinator; accessing the fields + individually has been deprecated + ([#413](https://github.com/Shopify/sarama/pull/413)). + - Much improved handling of `OffsetOutOfRange` errors in the consumer. + Consumers will fail to start if the provided offset is out of range + ([#418](https://github.com/Shopify/sarama/pull/418)) + and they will automatically shut down if the offset falls out of range + ([#424](https://github.com/Shopify/sarama/pull/424)). + - Small performance improvement in encoding and decoding protocol messages + ([#427](https://github.com/Shopify/sarama/pull/427)). + +Bug Fixes: + - Fix a rare race condition in the client's background metadata refresher if + it happens to be activated while the client is being closed + ([#422](https://github.com/Shopify/sarama/pull/422)). + +#### Version 1.2.0 (2015-04-07) + +Improvements: + - The producer's behaviour when `Flush.Frequency` is set is now more intuitive + ([#389](https://github.com/Shopify/sarama/pull/389)). + - The producer is now somewhat more memory-efficient during and after retrying + messages due to an improved queue implementation + ([#396](https://github.com/Shopify/sarama/pull/396)). + - The consumer produces much more useful logging output when leadership + changes ([#385](https://github.com/Shopify/sarama/pull/385)). + - The client's `GetOffset` method will now automatically refresh metadata and + retry once in the event of stale information or similar + ([#394](https://github.com/Shopify/sarama/pull/394)). + - Broker connections now have support for using TCP keepalives + ([#407](https://github.com/Shopify/sarama/issues/407)). + +Bug Fixes: + - The OffsetCommitRequest message now correctly implements all three possible + API versions ([#390](https://github.com/Shopify/sarama/pull/390), + [#400](https://github.com/Shopify/sarama/pull/400)). + +#### Version 1.1.0 (2015-03-20) + +Improvements: + - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly + broken topics don't choke throughput + ([#373](https://github.com/Shopify/sarama/pull/373)). + +Bug Fixes: + - Fix the producer's internal reference counting in certain unusual scenarios + ([#367](https://github.com/Shopify/sarama/pull/367)). + - Fix the consumer's internal reference counting in certain unusual scenarios + ([#369](https://github.com/Shopify/sarama/pull/369)). + - Fix a condition where the producer's internal control messages could have + gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)). + - Fix an issue where invalid partition lists would be cached when asking for + metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). + + +#### Version 1.0.0 (2015-03-17) + +Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: + +- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. +- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. +- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package. +- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. +- All the configuration values have been unified in the `Config` struct. +- Much improved test suite. diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE new file mode 100644 index 0000000000..d2bf4352f4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013 Shopify + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile new file mode 100644 index 0000000000..b9a453dd29 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/Makefile @@ -0,0 +1,30 @@ +default: fmt vet errcheck test + +# Taken from https://github.com/codecov/example-go#caveat-multiple-files +test: + echo "" > coverage.txt + for d in `go list ./... | grep -v vendor`; do \ + go test -p 1 -v -timeout 90s -race -coverprofile=profile.out -covermode=atomic $$d || exit 1; \ + if [ -f profile.out ]; then \ + cat profile.out >> coverage.txt; \ + rm profile.out; \ + fi \ + done + +vet: + go vet ./... + +# See https://github.com/kisielk/errcheck/pull/141 for details on ignorepkg +errcheck: + errcheck -ignorepkg fmt github.com/Shopify/sarama/... + +fmt: + @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi + +install_dependencies: install_errcheck get + +install_errcheck: + go get github.com/kisielk/errcheck + +get: + go get -t diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md new file mode 100644 index 0000000000..4fc0cc600f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/README.md @@ -0,0 +1,39 @@ +sarama +====== + +[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama) +[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama) +[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) + +Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later). + +### Getting started + +- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama). +- Mocks for testing are available in the [mocks](./mocks) subpackage. +- The [examples](./examples) directory contains more elaborate example applications. +- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. + +You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions). + +### Compatibility and API stability + +Sarama provides a "2 releases + 2 months" compatibility guarantee: we support +the two latest stable releases of Kafka and Go, and we provide a two month +grace period for older releases. This means we currently officially support +Go 1.8 through 1.10, and Kafka 0.11 through 1.1, although older releases are +still likely to work. + +Sarama follows semantic versioning and provides API stability via the gopkg.in service. +You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. +A changelog is available [here](CHANGELOG.md). + +### Contributing + +* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md). +* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more + technical and design details. +* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) + contains a wealth of useful information. +* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. +* If you have any questions, just ask! diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile new file mode 100644 index 0000000000..f4b848a301 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/Vagrantfile @@ -0,0 +1,20 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB +MEMORY = 3072 + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vm.box = "ubuntu/trusty64" + + config.vm.provision :shell, path: "vagrant/provision.sh" + + config.vm.network "private_network", ip: "192.168.100.67" + + config.vm.provider "virtualbox" do |v| + v.memory = MEMORY + end +end diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go new file mode 100644 index 0000000000..51517359ab --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_bindings.go @@ -0,0 +1,119 @@ +package sarama + +type Resource struct { + ResourceType AclResourceType + ResourceName string +} + +func (r *Resource) encode(pe packetEncoder) error { + pe.putInt8(int8(r.ResourceType)) + + if err := pe.putString(r.ResourceName); err != nil { + return err + } + + return nil +} + +func (r *Resource) decode(pd packetDecoder, version int16) (err error) { + resourceType, err := pd.getInt8() + if err != nil { + return err + } + r.ResourceType = AclResourceType(resourceType) + + if r.ResourceName, err = pd.getString(); err != nil { + return err + } + + return nil +} + +type Acl struct { + Principal string + Host string + Operation AclOperation + PermissionType AclPermissionType +} + +func (a *Acl) encode(pe packetEncoder) error { + if err := pe.putString(a.Principal); err != nil { + return err + } + + if err := pe.putString(a.Host); err != nil { + return err + } + + pe.putInt8(int8(a.Operation)) + pe.putInt8(int8(a.PermissionType)) + + return nil +} + +func (a *Acl) decode(pd packetDecoder, version int16) (err error) { + if a.Principal, err = pd.getString(); err != nil { + return err + } + + if a.Host, err = pd.getString(); err != nil { + return err + } + + operation, err := pd.getInt8() + if err != nil { + return err + } + a.Operation = AclOperation(operation) + + permissionType, err := pd.getInt8() + if err != nil { + return err + } + a.PermissionType = AclPermissionType(permissionType) + + return nil +} + +type ResourceAcls struct { + Resource + Acls []*Acl +} + +func (r *ResourceAcls) encode(pe packetEncoder) error { + if err := r.Resource.encode(pe); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Acls)); err != nil { + return err + } + for _, acl := range r.Acls { + if err := acl.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *ResourceAcls) decode(pd packetDecoder, version int16) error { + if err := r.Resource.decode(pd, version); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Acls = make([]*Acl, n) + for i := 0; i < n; i++ { + r.Acls[i] = new(Acl) + if err := r.Acls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go new file mode 100644 index 0000000000..0b6ecbec3e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_create_request.go @@ -0,0 +1,76 @@ +package sarama + +type CreateAclsRequest struct { + AclCreations []*AclCreation +} + +func (c *CreateAclsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.AclCreations)); err != nil { + return err + } + + for _, aclCreation := range c.AclCreations { + if err := aclCreation.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.AclCreations = make([]*AclCreation, n) + + for i := 0; i < n; i++ { + c.AclCreations[i] = new(AclCreation) + if err := c.AclCreations[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *CreateAclsRequest) key() int16 { + return 30 +} + +func (d *CreateAclsRequest) version() int16 { + return 0 +} + +func (d *CreateAclsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type AclCreation struct { + Resource + Acl +} + +func (a *AclCreation) encode(pe packetEncoder) error { + if err := a.Resource.encode(pe); err != nil { + return err + } + if err := a.Acl.encode(pe); err != nil { + return err + } + + return nil +} + +func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) { + if err := a.Resource.decode(pd, version); err != nil { + return err + } + if err := a.Acl.decode(pd, version); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go new file mode 100644 index 0000000000..8a56f35735 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_create_response.go @@ -0,0 +1,88 @@ +package sarama + +import "time" + +type CreateAclsResponse struct { + ThrottleTime time.Duration + AclCreationResponses []*AclCreationResponse +} + +func (c *CreateAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil { + return err + } + + for _, aclCreationResponse := range c.AclCreationResponses { + if err := aclCreationResponse.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.AclCreationResponses = make([]*AclCreationResponse, n) + for i := 0; i < n; i++ { + c.AclCreationResponses[i] = new(AclCreationResponse) + if err := c.AclCreationResponses[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *CreateAclsResponse) key() int16 { + return 30 +} + +func (d *CreateAclsResponse) version() int16 { + return 0 +} + +func (d *CreateAclsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type AclCreationResponse struct { + Err KError + ErrMsg *string +} + +func (a *AclCreationResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(a.Err)) + + if err := pe.putNullableString(a.ErrMsg); err != nil { + return err + } + + return nil +} + +func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + a.Err = KError(kerr) + + if a.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go new file mode 100644 index 0000000000..4133dceab7 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go @@ -0,0 +1,48 @@ +package sarama + +type DeleteAclsRequest struct { + Filters []*AclFilter +} + +func (d *DeleteAclsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(d.Filters)); err != nil { + return err + } + + for _, filter := range d.Filters { + if err := filter.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + d.Filters = make([]*AclFilter, n) + for i := 0; i < n; i++ { + d.Filters[i] = new(AclFilter) + if err := d.Filters[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsRequest) key() int16 { + return 31 +} + +func (d *DeleteAclsRequest) version() int16 { + return 0 +} + +func (d *DeleteAclsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go new file mode 100644 index 0000000000..b5e1c45eb5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go @@ -0,0 +1,155 @@ +package sarama + +import "time" + +type DeleteAclsResponse struct { + ThrottleTime time.Duration + FilterResponses []*FilterResponse +} + +func (a *DeleteAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(a.FilterResponses)); err != nil { + return err + } + + for _, filterResponse := range a.FilterResponses { + if err := filterResponse.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (a *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + a.FilterResponses = make([]*FilterResponse, n) + + for i := 0; i < n; i++ { + a.FilterResponses[i] = new(FilterResponse) + if err := a.FilterResponses[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsResponse) key() int16 { + return 31 +} + +func (d *DeleteAclsResponse) version() int16 { + return 0 +} + +func (d *DeleteAclsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type FilterResponse struct { + Err KError + ErrMsg *string + MatchingAcls []*MatchingAcl +} + +func (f *FilterResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(f.Err)) + if err := pe.putNullableString(f.ErrMsg); err != nil { + return err + } + + if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil { + return err + } + for _, matchingAcl := range f.MatchingAcls { + if err := matchingAcl.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + f.Err = KError(kerr) + + if f.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + f.MatchingAcls = make([]*MatchingAcl, n) + for i := 0; i < n; i++ { + f.MatchingAcls[i] = new(MatchingAcl) + if err := f.MatchingAcls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +type MatchingAcl struct { + Err KError + ErrMsg *string + Resource + Acl +} + +func (m *MatchingAcl) encode(pe packetEncoder) error { + pe.putInt16(int16(m.Err)) + if err := pe.putNullableString(m.ErrMsg); err != nil { + return err + } + + if err := m.Resource.encode(pe); err != nil { + return err + } + + if err := m.Acl.encode(pe); err != nil { + return err + } + + return nil +} + +func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + m.Err = KError(kerr) + + if m.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + if err := m.Resource.decode(pd, version); err != nil { + return err + } + + if err := m.Acl.decode(pd, version); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go new file mode 100644 index 0000000000..02a5a1f0e2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go @@ -0,0 +1,25 @@ +package sarama + +type DescribeAclsRequest struct { + AclFilter +} + +func (d *DescribeAclsRequest) encode(pe packetEncoder) error { + return d.AclFilter.encode(pe) +} + +func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) { + return d.AclFilter.decode(pd, version) +} + +func (d *DescribeAclsRequest) key() int16 { + return 29 +} + +func (d *DescribeAclsRequest) version() int16 { + return 0 +} + +func (d *DescribeAclsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go new file mode 100644 index 0000000000..5bc9497f4c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go @@ -0,0 +1,80 @@ +package sarama + +import "time" + +type DescribeAclsResponse struct { + ThrottleTime time.Duration + Err KError + ErrMsg *string + ResourceAcls []*ResourceAcls +} + +func (d *DescribeAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(d.Err)) + + if err := pe.putNullableString(d.ErrMsg); err != nil { + return err + } + + if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil { + return err + } + + for _, resourceAcl := range d.ResourceAcls { + if err := resourceAcl.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + d.Err = KError(kerr) + + errmsg, err := pd.getString() + if err != nil { + return err + } + if errmsg != "" { + d.ErrMsg = &errmsg + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + d.ResourceAcls = make([]*ResourceAcls, n) + + for i := 0; i < n; i++ { + d.ResourceAcls[i] = new(ResourceAcls) + if err := d.ResourceAcls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DescribeAclsResponse) key() int16 { + return 29 +} + +func (d *DescribeAclsResponse) version() int16 { + return 0 +} + +func (d *DescribeAclsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/Shopify/sarama/acl_filter.go new file mode 100644 index 0000000000..9706354219 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_filter.go @@ -0,0 +1,61 @@ +package sarama + +type AclFilter struct { + ResourceType AclResourceType + ResourceName *string + Principal *string + Host *string + Operation AclOperation + PermissionType AclPermissionType +} + +func (a *AclFilter) encode(pe packetEncoder) error { + pe.putInt8(int8(a.ResourceType)) + if err := pe.putNullableString(a.ResourceName); err != nil { + return err + } + if err := pe.putNullableString(a.Principal); err != nil { + return err + } + if err := pe.putNullableString(a.Host); err != nil { + return err + } + pe.putInt8(int8(a.Operation)) + pe.putInt8(int8(a.PermissionType)) + + return nil +} + +func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) { + resourceType, err := pd.getInt8() + if err != nil { + return err + } + a.ResourceType = AclResourceType(resourceType) + + if a.ResourceName, err = pd.getNullableString(); err != nil { + return err + } + + if a.Principal, err = pd.getNullableString(); err != nil { + return err + } + + if a.Host, err = pd.getNullableString(); err != nil { + return err + } + + operation, err := pd.getInt8() + if err != nil { + return err + } + a.Operation = AclOperation(operation) + + permissionType, err := pd.getInt8() + if err != nil { + return err + } + a.PermissionType = AclPermissionType(permissionType) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go new file mode 100644 index 0000000000..19da6f2f45 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_types.go @@ -0,0 +1,42 @@ +package sarama + +type AclOperation int + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java +const ( + AclOperationUnknown AclOperation = 0 + AclOperationAny AclOperation = 1 + AclOperationAll AclOperation = 2 + AclOperationRead AclOperation = 3 + AclOperationWrite AclOperation = 4 + AclOperationCreate AclOperation = 5 + AclOperationDelete AclOperation = 6 + AclOperationAlter AclOperation = 7 + AclOperationDescribe AclOperation = 8 + AclOperationClusterAction AclOperation = 9 + AclOperationDescribeConfigs AclOperation = 10 + AclOperationAlterConfigs AclOperation = 11 + AclOperationIdempotentWrite AclOperation = 12 +) + +type AclPermissionType int + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java +const ( + AclPermissionUnknown AclPermissionType = 0 + AclPermissionAny AclPermissionType = 1 + AclPermissionDeny AclPermissionType = 2 + AclPermissionAllow AclPermissionType = 3 +) + +type AclResourceType int + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java +const ( + AclResourceUnknown AclResourceType = 0 + AclResourceAny AclResourceType = 1 + AclResourceTopic AclResourceType = 2 + AclResourceGroup AclResourceType = 3 + AclResourceCluster AclResourceType = 4 + AclResourceTransactionalID AclResourceType = 5 +) diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go new file mode 100644 index 0000000000..6da166c634 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go @@ -0,0 +1,52 @@ +package sarama + +type AddOffsetsToTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + GroupID string +} + +func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + + pe.putInt64(a.ProducerID) + + pe.putInt16(a.ProducerEpoch) + + if err := pe.putString(a.GroupID); err != nil { + return err + } + + return nil +} + +func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + if a.GroupID, err = pd.getString(); err != nil { + return err + } + return nil +} + +func (a *AddOffsetsToTxnRequest) key() int16 { + return 25 +} + +func (a *AddOffsetsToTxnRequest) version() int16 { + return 0 +} + +func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go new file mode 100644 index 0000000000..3a46151a05 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go @@ -0,0 +1,44 @@ +package sarama + +import ( + "time" +) + +type AddOffsetsToTxnResponse struct { + ThrottleTime time.Duration + Err KError +} + +func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(a.Err)) + return nil +} + +func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + a.Err = KError(kerr) + + return nil +} + +func (a *AddOffsetsToTxnResponse) key() int16 { + return 25 +} + +func (a *AddOffsetsToTxnResponse) version() int16 { + return 0 +} + +func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go new file mode 100644 index 0000000000..a8a59225e4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go @@ -0,0 +1,76 @@ +package sarama + +type AddPartitionsToTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + TopicPartitions map[string][]int32 +} + +func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + pe.putInt64(a.ProducerID) + pe.putInt16(a.ProducerEpoch) + + if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil { + return err + } + for topic, partitions := range a.TopicPartitions { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + return nil +} + +func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + a.TopicPartitions = make(map[string][]int32) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + partitions, err := pd.getInt32Array() + if err != nil { + return err + } + + a.TopicPartitions[topic] = partitions + } + + return nil +} + +func (a *AddPartitionsToTxnRequest) key() int16 { + return 24 +} + +func (a *AddPartitionsToTxnRequest) version() int16 { + return 0 +} + +func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go new file mode 100644 index 0000000000..581c556c5c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go @@ -0,0 +1,108 @@ +package sarama + +import ( + "time" +) + +type AddPartitionsToTxnResponse struct { + ThrottleTime time.Duration + Errors map[string][]*PartitionError +} + +func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(a.Errors)); err != nil { + return err + } + + for topic, e := range a.Errors { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(e)); err != nil { + return err + } + for _, partitionError := range e { + if err := partitionError.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Errors = make(map[string][]*PartitionError) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Errors[topic] = make([]*PartitionError, m) + + for j := 0; j < m; j++ { + a.Errors[topic][j] = new(PartitionError) + if err := a.Errors[topic][j].decode(pd, version); err != nil { + return err + } + } + } + + return nil +} + +func (a *AddPartitionsToTxnResponse) key() int16 { + return 24 +} + +func (a *AddPartitionsToTxnResponse) version() int16 { + return 0 +} + +func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type PartitionError struct { + Partition int32 + Err KError +} + +func (p *PartitionError) encode(pe packetEncoder) error { + pe.putInt32(p.Partition) + pe.putInt16(int16(p.Err)) + return nil +} + +func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) { + if p.Partition, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + p.Err = KError(kerr) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go new file mode 100644 index 0000000000..48c44ead67 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go @@ -0,0 +1,120 @@ +package sarama + +type AlterConfigsRequest struct { + Resources []*AlterConfigsResource + ValidateOnly bool +} + +type AlterConfigsResource struct { + Type ConfigResourceType + Name string + ConfigEntries map[string]*string +} + +func (acr *AlterConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(acr.Resources)); err != nil { + return err + } + + for _, r := range acr.Resources { + if err := r.encode(pe); err != nil { + return err + } + } + + pe.putBool(acr.ValidateOnly) + return nil +} + +func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error { + resourceCount, err := pd.getArrayLength() + if err != nil { + return err + } + + acr.Resources = make([]*AlterConfigsResource, resourceCount) + for i := range acr.Resources { + r := &AlterConfigsResource{} + err = r.decode(pd, version) + if err != nil { + return err + } + acr.Resources[i] = r + } + + validateOnly, err := pd.getBool() + if err != nil { + return err + } + + acr.ValidateOnly = validateOnly + + return nil +} + +func (ac *AlterConfigsResource) encode(pe packetEncoder) error { + pe.putInt8(int8(ac.Type)) + + if err := pe.putString(ac.Name); err != nil { + return err + } + + if err := pe.putArrayLength(len(ac.ConfigEntries)); err != nil { + return err + } + for configKey, configValue := range ac.ConfigEntries { + if err := pe.putString(configKey); err != nil { + return err + } + if err := pe.putNullableString(configValue); err != nil { + return err + } + } + + return nil +} + +func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error { + t, err := pd.getInt8() + if err != nil { + return err + } + ac.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + ac.Name = name + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + ac.ConfigEntries = make(map[string]*string, n) + for i := 0; i < n; i++ { + configKey, err := pd.getString() + if err != nil { + return err + } + if ac.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { + return err + } + } + } + return err +} + +func (acr *AlterConfigsRequest) key() int16 { + return 33 +} + +func (acr *AlterConfigsRequest) version() int16 { + return 0 +} + +func (acr *AlterConfigsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go new file mode 100644 index 0000000000..29b09e1ff8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go @@ -0,0 +1,95 @@ +package sarama + +import "time" + +type AlterConfigsResponse struct { + ThrottleTime time.Duration + Resources []*AlterConfigsResourceResponse +} + +type AlterConfigsResourceResponse struct { + ErrorCode int16 + ErrorMsg string + Type ConfigResourceType + Name string +} + +func (ct *AlterConfigsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(ct.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(ct.Resources)); err != nil { + return err + } + + for i := range ct.Resources { + pe.putInt16(ct.Resources[i].ErrorCode) + err := pe.putString(ct.Resources[i].ErrorMsg) + if err != nil { + return nil + } + pe.putInt8(int8(ct.Resources[i].Type)) + err = pe.putString(ct.Resources[i].Name) + if err != nil { + return nil + } + } + + return nil +} + +func (acr *AlterConfigsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + acr.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + responseCount, err := pd.getArrayLength() + if err != nil { + return err + } + + acr.Resources = make([]*AlterConfigsResourceResponse, responseCount) + + for i := range acr.Resources { + acr.Resources[i] = new(AlterConfigsResourceResponse) + + errCode, err := pd.getInt16() + if err != nil { + return err + } + acr.Resources[i].ErrorCode = errCode + + e, err := pd.getString() + if err != nil { + return err + } + acr.Resources[i].ErrorMsg = e + + t, err := pd.getInt8() + if err != nil { + return err + } + acr.Resources[i].Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + acr.Resources[i].Name = name + } + + return nil +} + +func (r *AlterConfigsResponse) key() int16 { + return 32 +} + +func (r *AlterConfigsResponse) version() int16 { + return 0 +} + +func (r *AlterConfigsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go new file mode 100644 index 0000000000..ab65f01ccf --- /dev/null +++ b/vendor/github.com/Shopify/sarama/api_versions_request.go @@ -0,0 +1,24 @@ +package sarama + +type ApiVersionsRequest struct { +} + +func (r *ApiVersionsRequest) encode(pe packetEncoder) error { + return nil +} + +func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { + return nil +} + +func (r *ApiVersionsRequest) key() int16 { + return 18 +} + +func (r *ApiVersionsRequest) version() int16 { + return 0 +} + +func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go new file mode 100644 index 0000000000..23bc326e15 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/api_versions_response.go @@ -0,0 +1,87 @@ +package sarama + +type ApiVersionsResponseBlock struct { + ApiKey int16 + MinVersion int16 + MaxVersion int16 +} + +func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error { + pe.putInt16(b.ApiKey) + pe.putInt16(b.MinVersion) + pe.putInt16(b.MaxVersion) + return nil +} + +func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error { + var err error + + if b.ApiKey, err = pd.getInt16(); err != nil { + return err + } + + if b.MinVersion, err = pd.getInt16(); err != nil { + return err + } + + if b.MaxVersion, err = pd.getInt16(); err != nil { + return err + } + + return nil +} + +type ApiVersionsResponse struct { + Err KError + ApiVersions []*ApiVersionsResponseBlock +} + +func (r *ApiVersionsResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + if err := pe.putArrayLength(len(r.ApiVersions)); err != nil { + return err + } + for _, apiVersion := range r.ApiVersions { + if err := apiVersion.encode(pe); err != nil { + return err + } + } + return nil +} + +func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks) + for i := 0; i < numBlocks; i++ { + block := new(ApiVersionsResponseBlock) + if err := block.decode(pd); err != nil { + return err + } + r.ApiVersions[i] = block + } + + return nil +} + +func (r *ApiVersionsResponse) key() int16 { + return 18 +} + +func (r *ApiVersionsResponse) version() int16 { + return 0 +} + +func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go new file mode 100644 index 0000000000..1eff81cbf6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/async_producer.go @@ -0,0 +1,921 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "sync" + "time" + + "github.com/eapache/go-resiliency/breaker" + "github.com/eapache/queue" +) + +// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages +// to the correct broker for the provided topic-partition, refreshing metadata as appropriate, +// and parses responses for errors. You must read from the Errors() channel or the +// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid +// leaks: it will not be garbage-collected automatically when it passes out of +// scope. +type AsyncProducer interface { + + // AsyncClose triggers a shutdown of the producer. The shutdown has completed + // when both the Errors and Successes channels have been closed. When calling + // AsyncClose, you *must* continue to read from those channels in order to + // drain the results of any messages in flight. + AsyncClose() + + // Close shuts down the producer and waits for any buffered messages to be + // flushed. You must call this function before a producer object passes out of + // scope, as it may otherwise leak memory. You must call this before calling + // Close on the underlying client. + Close() error + + // Input is the input channel for the user to write messages to that they + // wish to send. + Input() chan<- *ProducerMessage + + // Successes is the success output channel back to the user when Return.Successes is + // enabled. If Return.Successes is true, you MUST read from this channel or the + // Producer will deadlock. It is suggested that you send and read messages + // together in a single select statement. + Successes() <-chan *ProducerMessage + + // Errors is the error output channel back to the user. You MUST read from this + // channel or the Producer will deadlock when the channel is full. Alternatively, + // you can set Producer.Return.Errors in your config to false, which prevents + // errors to be returned. + Errors() <-chan *ProducerError +} + +type asyncProducer struct { + client Client + conf *Config + ownClient bool + + errors chan *ProducerError + input, successes, retries chan *ProducerMessage + inFlight sync.WaitGroup + + brokers map[*Broker]chan<- *ProducerMessage + brokerRefs map[chan<- *ProducerMessage]int + brokerLock sync.Mutex +} + +// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. +func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) { + client, err := NewClient(addrs, conf) + if err != nil { + return nil, err + } + + p, err := NewAsyncProducerFromClient(client) + if err != nil { + return nil, err + } + p.(*asyncProducer).ownClient = true + return p, nil +} + +// NewAsyncProducerFromClient creates a new Producer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this producer. +func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + p := &asyncProducer{ + client: client, + conf: client.Config(), + errors: make(chan *ProducerError), + input: make(chan *ProducerMessage), + successes: make(chan *ProducerMessage), + retries: make(chan *ProducerMessage), + brokers: make(map[*Broker]chan<- *ProducerMessage), + brokerRefs: make(map[chan<- *ProducerMessage]int), + } + + // launch our singleton dispatchers + go withRecover(p.dispatcher) + go withRecover(p.retryHandler) + + return p, nil +} + +type flagSet int8 + +const ( + syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer + fin // final message from partitionProducer to brokerProducer and back + shutdown // start the shutdown process +) + +// ProducerMessage is the collection of elements passed to the Producer in order to send a message. +type ProducerMessage struct { + Topic string // The Kafka topic for this message. + // The partitioning key for this message. Pre-existing Encoders include + // StringEncoder and ByteEncoder. + Key Encoder + // The actual message to store in Kafka. Pre-existing Encoders include + // StringEncoder and ByteEncoder. + Value Encoder + + // The headers are key-value pairs that are transparently passed + // by Kafka between producers and consumers. + Headers []RecordHeader + + // This field is used to hold arbitrary data you wish to include so it + // will be available when receiving on the Successes and Errors channels. + // Sarama completely ignores this field and is only to be used for + // pass-through data. + Metadata interface{} + + // Below this point are filled in by the producer as the message is processed + + // Offset is the offset of the message stored on the broker. This is only + // guaranteed to be defined if the message was successfully delivered and + // RequiredAcks is not NoResponse. + Offset int64 + // Partition is the partition that the message was sent to. This is only + // guaranteed to be defined if the message was successfully delivered. + Partition int32 + // Timestamp is the timestamp assigned to the message by the broker. This + // is only guaranteed to be defined if the message was successfully + // delivered, RequiredAcks is not NoResponse, and the Kafka broker is at + // least version 0.10.0. + Timestamp time.Time + + retries int + flags flagSet +} + +const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. + +func (m *ProducerMessage) byteSize(version int) int { + var size int + if version >= 2 { + size = maximumRecordOverhead + for _, h := range m.Headers { + size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32 + } + } else { + size = producerMessageOverhead + } + if m.Key != nil { + size += m.Key.Length() + } + if m.Value != nil { + size += m.Value.Length() + } + return size +} + +func (m *ProducerMessage) clear() { + m.flags = 0 + m.retries = 0 +} + +// ProducerError is the type of error generated when the producer fails to deliver a message. +// It contains the original ProducerMessage as well as the actual error value. +type ProducerError struct { + Msg *ProducerMessage + Err error +} + +func (pe ProducerError) Error() string { + return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err) +} + +// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface. +// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel +// when closing a producer. +type ProducerErrors []*ProducerError + +func (pe ProducerErrors) Error() string { + return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe)) +} + +func (p *asyncProducer) Errors() <-chan *ProducerError { + return p.errors +} + +func (p *asyncProducer) Successes() <-chan *ProducerMessage { + return p.successes +} + +func (p *asyncProducer) Input() chan<- *ProducerMessage { + return p.input +} + +func (p *asyncProducer) Close() error { + p.AsyncClose() + + if p.conf.Producer.Return.Successes { + go withRecover(func() { + for range p.successes { + } + }) + } + + var errors ProducerErrors + if p.conf.Producer.Return.Errors { + for event := range p.errors { + errors = append(errors, event) + } + } else { + <-p.errors + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (p *asyncProducer) AsyncClose() { + go withRecover(p.shutdown) +} + +// singleton +// dispatches messages by topic +func (p *asyncProducer) dispatcher() { + handlers := make(map[string]chan<- *ProducerMessage) + shuttingDown := false + + for msg := range p.input { + if msg == nil { + Logger.Println("Something tried to send a nil message, it was ignored.") + continue + } + + if msg.flags&shutdown != 0 { + shuttingDown = true + p.inFlight.Done() + continue + } else if msg.retries == 0 { + if shuttingDown { + // we can't just call returnError here because that decrements the wait group, + // which hasn't been incremented yet for this message, and shouldn't be + pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown} + if p.conf.Producer.Return.Errors { + p.errors <- pErr + } else { + Logger.Println(pErr) + } + continue + } + p.inFlight.Add(1) + } + + version := 1 + if p.conf.Version.IsAtLeast(V0_11_0_0) { + version = 2 + } + if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes { + p.returnError(msg, ErrMessageSizeTooLarge) + continue + } + + handler := handlers[msg.Topic] + if handler == nil { + handler = p.newTopicProducer(msg.Topic) + handlers[msg.Topic] = handler + } + + handler <- msg + } + + for _, handler := range handlers { + close(handler) + } +} + +// one per topic +// partitions messages, then dispatches them by partition +type topicProducer struct { + parent *asyncProducer + topic string + input <-chan *ProducerMessage + + breaker *breaker.Breaker + handlers map[int32]chan<- *ProducerMessage + partitioner Partitioner +} + +func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage { + input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) + tp := &topicProducer{ + parent: p, + topic: topic, + input: input, + breaker: breaker.New(3, 1, 10*time.Second), + handlers: make(map[int32]chan<- *ProducerMessage), + partitioner: p.conf.Producer.Partitioner(topic), + } + go withRecover(tp.dispatch) + return input +} + +func (tp *topicProducer) dispatch() { + for msg := range tp.input { + if msg.retries == 0 { + if err := tp.partitionMessage(msg); err != nil { + tp.parent.returnError(msg, err) + continue + } + } + + handler := tp.handlers[msg.Partition] + if handler == nil { + handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition) + tp.handlers[msg.Partition] = handler + } + + handler <- msg + } + + for _, handler := range tp.handlers { + close(handler) + } +} + +func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { + var partitions []int32 + + err := tp.breaker.Run(func() (err error) { + if tp.partitioner.RequiresConsistency() { + partitions, err = tp.parent.client.Partitions(msg.Topic) + } else { + partitions, err = tp.parent.client.WritablePartitions(msg.Topic) + } + return + }) + + if err != nil { + return err + } + + numPartitions := int32(len(partitions)) + + if numPartitions == 0 { + return ErrLeaderNotAvailable + } + + choice, err := tp.partitioner.Partition(msg, numPartitions) + + if err != nil { + return err + } else if choice < 0 || choice >= numPartitions { + return ErrInvalidPartition + } + + msg.Partition = partitions[choice] + + return nil +} + +// one per partition per topic +// dispatches messages to the appropriate broker +// also responsible for maintaining message order during retries +type partitionProducer struct { + parent *asyncProducer + topic string + partition int32 + input <-chan *ProducerMessage + + leader *Broker + breaker *breaker.Breaker + output chan<- *ProducerMessage + + // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through, + // all other messages get buffered in retryState[msg.retries].buf to preserve ordering + // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and + // therefore whether our buffer is complete and safe to flush) + highWatermark int + retryState []partitionRetryState +} + +type partitionRetryState struct { + buf []*ProducerMessage + expectChaser bool +} + +func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage { + input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) + pp := &partitionProducer{ + parent: p, + topic: topic, + partition: partition, + input: input, + + breaker: breaker.New(3, 1, 10*time.Second), + retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1), + } + go withRecover(pp.dispatch) + return input +} + +func (pp *partitionProducer) dispatch() { + // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` + // on the first message + pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition) + if pp.leader != nil { + pp.output = pp.parent.getBrokerProducer(pp.leader) + pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight + pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + } + + for msg := range pp.input { + if msg.retries > pp.highWatermark { + // a new, higher, retry level; handle it and then back off + pp.newHighWatermark(msg.retries) + time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + } else if pp.highWatermark > 0 { + // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level + if msg.retries < pp.highWatermark { + // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin) + if msg.flags&fin == fin { + pp.retryState[msg.retries].expectChaser = false + pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected + } else { + pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg) + } + continue + } else if msg.flags&fin == fin { + // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set, + // meaning this retry level is done and we can go down (at least) one level and flush that + pp.retryState[pp.highWatermark].expectChaser = false + pp.flushRetryBuffers() + pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected + continue + } + } + + // if we made it this far then the current msg contains real data, and can be sent to the next goroutine + // without breaking any of our ordering guarantees + + if pp.output == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnError(msg, err) + time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + continue + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + + pp.output <- msg + } + + if pp.output != nil { + pp.parent.unrefBrokerProducer(pp.leader, pp.output) + } +} + +func (pp *partitionProducer) newHighWatermark(hwm int) { + Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm) + pp.highWatermark = hwm + + // send off a fin so that we know when everything "in between" has made it + // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages) + pp.retryState[pp.highWatermark].expectChaser = true + pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight + pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1} + + // a new HWM means that our current broker selection is out of date + Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + pp.parent.unrefBrokerProducer(pp.leader, pp.output) + pp.output = nil +} + +func (pp *partitionProducer) flushRetryBuffers() { + Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark) + for { + pp.highWatermark-- + + if pp.output == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err) + goto flushDone + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + + for _, msg := range pp.retryState[pp.highWatermark].buf { + pp.output <- msg + } + + flushDone: + pp.retryState[pp.highWatermark].buf = nil + if pp.retryState[pp.highWatermark].expectChaser { + Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark) + break + } else if pp.highWatermark == 0 { + Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition) + break + } + } +} + +func (pp *partitionProducer) updateLeader() error { + return pp.breaker.Run(func() (err error) { + if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil { + return err + } + + if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil { + return err + } + + pp.output = pp.parent.getBrokerProducer(pp.leader) + pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight + pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + + return nil + }) +} + +// one per broker; also constructs an associated flusher +func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage { + var ( + input = make(chan *ProducerMessage) + bridge = make(chan *produceSet) + responses = make(chan *brokerProducerResponse) + ) + + bp := &brokerProducer{ + parent: p, + broker: broker, + input: input, + output: bridge, + responses: responses, + buffer: newProduceSet(p), + currentRetries: make(map[string]map[int32]error), + } + go withRecover(bp.run) + + // minimal bridge to make the network response `select`able + go withRecover(func() { + for set := range bridge { + request := set.buildRequest() + + response, err := broker.Produce(request) + + responses <- &brokerProducerResponse{ + set: set, + err: err, + res: response, + } + } + close(responses) + }) + + return input +} + +type brokerProducerResponse struct { + set *produceSet + err error + res *ProduceResponse +} + +// groups messages together into appropriately-sized batches for sending to the broker +// handles state related to retries etc +type brokerProducer struct { + parent *asyncProducer + broker *Broker + + input <-chan *ProducerMessage + output chan<- *produceSet + responses <-chan *brokerProducerResponse + + buffer *produceSet + timer <-chan time.Time + timerFired bool + + closing error + currentRetries map[string]map[int32]error +} + +func (bp *brokerProducer) run() { + var output chan<- *produceSet + Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID()) + + for { + select { + case msg := <-bp.input: + if msg == nil { + bp.shutdown() + return + } + + if msg.flags&syn == syn { + Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n", + bp.broker.ID(), msg.Topic, msg.Partition) + if bp.currentRetries[msg.Topic] == nil { + bp.currentRetries[msg.Topic] = make(map[int32]error) + } + bp.currentRetries[msg.Topic][msg.Partition] = nil + bp.parent.inFlight.Done() + continue + } + + if reason := bp.needsRetry(msg); reason != nil { + bp.parent.retryMessage(msg, reason) + + if bp.closing == nil && msg.flags&fin == fin { + // we were retrying this partition but we can start processing again + delete(bp.currentRetries[msg.Topic], msg.Partition) + Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n", + bp.broker.ID(), msg.Topic, msg.Partition) + } + + continue + } + + if bp.buffer.wouldOverflow(msg) { + if err := bp.waitForSpace(msg); err != nil { + bp.parent.retryMessage(msg, err) + continue + } + } + + if err := bp.buffer.add(msg); err != nil { + bp.parent.returnError(msg, err) + continue + } + + if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil { + bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency) + } + case <-bp.timer: + bp.timerFired = true + case output <- bp.buffer: + bp.rollOver() + case response := <-bp.responses: + bp.handleResponse(response) + } + + if bp.timerFired || bp.buffer.readyToFlush() { + output = bp.output + } else { + output = nil + } + } +} + +func (bp *brokerProducer) shutdown() { + for !bp.buffer.empty() { + select { + case response := <-bp.responses: + bp.handleResponse(response) + case bp.output <- bp.buffer: + bp.rollOver() + } + } + close(bp.output) + for response := range bp.responses { + bp.handleResponse(response) + } + + Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID()) +} + +func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error { + if bp.closing != nil { + return bp.closing + } + + return bp.currentRetries[msg.Topic][msg.Partition] +} + +func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error { + Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) + + for { + select { + case response := <-bp.responses: + bp.handleResponse(response) + // handling a response can change our state, so re-check some things + if reason := bp.needsRetry(msg); reason != nil { + return reason + } else if !bp.buffer.wouldOverflow(msg) { + return nil + } + case bp.output <- bp.buffer: + bp.rollOver() + return nil + } + } +} + +func (bp *brokerProducer) rollOver() { + bp.timer = nil + bp.timerFired = false + bp.buffer = newProduceSet(bp.parent) +} + +func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) { + if response.err != nil { + bp.handleError(response.set, response.err) + } else { + bp.handleSuccess(response.set, response.res) + } + + if bp.buffer.empty() { + bp.rollOver() // this can happen if the response invalidated our buffer + } +} + +func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) { + // we iterate through the blocks in the request set, not the response, so that we notice + // if the response is missing a block completely + sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + if response == nil { + // this only happens when RequiredAcks is NoResponse, so we have to assume success + bp.parent.returnSuccesses(msgs) + return + } + + block := response.GetBlock(topic, partition) + if block == nil { + bp.parent.returnErrors(msgs, ErrIncompleteResponse) + return + } + + switch block.Err { + // Success + case ErrNoError: + if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() { + for _, msg := range msgs { + msg.Timestamp = block.Timestamp + } + } + for i, msg := range msgs { + msg.Offset = block.Offset + int64(i) + } + bp.parent.returnSuccesses(msgs) + // Retriable errors + case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", + bp.broker.ID(), topic, partition, block.Err) + bp.currentRetries[topic][partition] = block.Err + bp.parent.retryMessages(msgs, block.Err) + bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err) + // Other non-retriable errors + default: + bp.parent.returnErrors(msgs, block.Err) + } + }) +} + +func (bp *brokerProducer) handleError(sent *produceSet, err error) { + switch err.(type) { + case PacketEncodingError: + sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + bp.parent.returnErrors(msgs, err) + }) + default: + Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err) + bp.parent.abandonBrokerConnection(bp.broker) + _ = bp.broker.Close() + bp.closing = err + sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + bp.parent.retryMessages(msgs, err) + }) + bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + bp.parent.retryMessages(msgs, err) + }) + bp.rollOver() + } +} + +// singleton +// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock +// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel +func (p *asyncProducer) retryHandler() { + var msg *ProducerMessage + buf := queue.New() + + for { + if buf.Length() == 0 { + msg = <-p.retries + } else { + select { + case msg = <-p.retries: + case p.input <- buf.Peek().(*ProducerMessage): + buf.Remove() + continue + } + } + + if msg == nil { + return + } + + buf.Add(msg) + } +} + +// utility functions + +func (p *asyncProducer) shutdown() { + Logger.Println("Producer shutting down.") + p.inFlight.Add(1) + p.input <- &ProducerMessage{flags: shutdown} + + p.inFlight.Wait() + + if p.ownClient { + err := p.client.Close() + if err != nil { + Logger.Println("producer/shutdown failed to close the embedded client:", err) + } + } + + close(p.input) + close(p.retries) + close(p.errors) + close(p.successes) +} + +func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { + msg.clear() + pErr := &ProducerError{Msg: msg, Err: err} + if p.conf.Producer.Return.Errors { + p.errors <- pErr + } else { + Logger.Println(pErr) + } + p.inFlight.Done() +} + +func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) { + for _, msg := range batch { + p.returnError(msg, err) + } +} + +func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) { + for _, msg := range batch { + if p.conf.Producer.Return.Successes { + msg.clear() + p.successes <- msg + } + p.inFlight.Done() + } +} + +func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) { + if msg.retries >= p.conf.Producer.Retry.Max { + p.returnError(msg, err) + } else { + msg.retries++ + p.retries <- msg + } +} + +func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) { + for _, msg := range batch { + p.retryMessage(msg, err) + } +} + +func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + bp := p.brokers[broker] + + if bp == nil { + bp = p.newBrokerProducer(broker) + p.brokers[broker] = bp + p.brokerRefs[bp] = 0 + } + + p.brokerRefs[bp]++ + + return bp +} + +func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + p.brokerRefs[bp]-- + if p.brokerRefs[bp] == 0 { + close(bp) + delete(p.brokerRefs, bp) + + if p.brokers[broker] == bp { + delete(p.brokers, broker) + } + } +} + +func (p *asyncProducer) abandonBrokerConnection(broker *Broker) { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + delete(p.brokers, broker) +} diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go new file mode 100644 index 0000000000..d836bee6d8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/broker.go @@ -0,0 +1,883 @@ +package sarama + +import ( + "crypto/tls" + "encoding/binary" + "fmt" + "io" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/rcrowley/go-metrics" +) + +// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe. +type Broker struct { + id int32 + addr string + rack *string + + conf *Config + correlationID int32 + conn net.Conn + connErr error + lock sync.Mutex + opened int32 + + responses chan responsePromise + done chan bool + + incomingByteRate metrics.Meter + requestRate metrics.Meter + requestSize metrics.Histogram + requestLatency metrics.Histogram + outgoingByteRate metrics.Meter + responseRate metrics.Meter + responseSize metrics.Histogram + brokerIncomingByteRate metrics.Meter + brokerRequestRate metrics.Meter + brokerRequestSize metrics.Histogram + brokerRequestLatency metrics.Histogram + brokerOutgoingByteRate metrics.Meter + brokerResponseRate metrics.Meter + brokerResponseSize metrics.Histogram +} + +type responsePromise struct { + requestTime time.Time + correlationID int32 + packets chan []byte + errors chan error +} + +// NewBroker creates and returns a Broker targeting the given host:port address. +// This does not attempt to actually connect, you have to call Open() for that. +func NewBroker(addr string) *Broker { + return &Broker{id: -1, addr: addr} +} + +// Open tries to connect to the Broker if it is not already connected or connecting, but does not block +// waiting for the connection to complete. This means that any subsequent operations on the broker will +// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call, +// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or +// AlreadyConnected. If conf is nil, the result of NewConfig() is used. +func (b *Broker) Open(conf *Config) error { + if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) { + return ErrAlreadyConnected + } + + if conf == nil { + conf = NewConfig() + } + + err := conf.Validate() + if err != nil { + return err + } + + b.lock.Lock() + + go withRecover(func() { + defer b.lock.Unlock() + + dialer := net.Dialer{ + Timeout: conf.Net.DialTimeout, + KeepAlive: conf.Net.KeepAlive, + } + + if conf.Net.TLS.Enable { + b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config) + } else { + b.conn, b.connErr = dialer.Dial("tcp", b.addr) + } + if b.connErr != nil { + Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr) + b.conn = nil + atomic.StoreInt32(&b.opened, 0) + return + } + b.conn = newBufConn(b.conn) + + b.conf = conf + + // Create or reuse the global metrics shared between brokers + b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry) + b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry) + b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry) + b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry) + b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry) + b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry) + b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry) + // Do not gather metrics for seeded broker (only used during bootstrap) because they share + // the same id (-1) and are already exposed through the global metrics above + if b.id >= 0 { + b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry) + b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry) + b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry) + b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry) + b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry) + b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry) + b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry) + } + + if conf.Net.SASL.Enable { + b.connErr = b.sendAndReceiveSASLPlainAuth() + if b.connErr != nil { + err = b.conn.Close() + if err == nil { + Logger.Printf("Closed connection to broker %s\n", b.addr) + } else { + Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + } + b.conn = nil + atomic.StoreInt32(&b.opened, 0) + return + } + } + + b.done = make(chan bool) + b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1) + + if b.id >= 0 { + Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id) + } else { + Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr) + } + go withRecover(b.responseReceiver) + }) + + return nil +} + +// Connected returns true if the broker is connected and false otherwise. If the broker is not +// connected but it had tried to connect, the error from that connection attempt is also returned. +func (b *Broker) Connected() (bool, error) { + b.lock.Lock() + defer b.lock.Unlock() + + return b.conn != nil, b.connErr +} + +func (b *Broker) Close() error { + b.lock.Lock() + defer b.lock.Unlock() + + if b.conn == nil { + return ErrNotConnected + } + + close(b.responses) + <-b.done + + err := b.conn.Close() + + b.conn = nil + b.connErr = nil + b.done = nil + b.responses = nil + + if b.id >= 0 { + b.conf.MetricRegistry.Unregister(getMetricNameForBroker("incoming-byte-rate", b)) + b.conf.MetricRegistry.Unregister(getMetricNameForBroker("request-rate", b)) + b.conf.MetricRegistry.Unregister(getMetricNameForBroker("outgoing-byte-rate", b)) + b.conf.MetricRegistry.Unregister(getMetricNameForBroker("response-rate", b)) + } + + if err == nil { + Logger.Printf("Closed connection to broker %s\n", b.addr) + } else { + Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + } + + atomic.StoreInt32(&b.opened, 0) + + return err +} + +// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known. +func (b *Broker) ID() int32 { + return b.id +} + +// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker. +func (b *Broker) Addr() string { + return b.addr +} + +func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { + response := new(MetadataResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) { + response := new(ConsumerMetadataResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) { + response := new(FindCoordinatorResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) { + response := new(OffsetResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { + var response *ProduceResponse + var err error + + if request.RequiredAcks == NoResponse { + err = b.sendAndReceive(request, nil) + } else { + response = new(ProduceResponse) + err = b.sendAndReceive(request, response) + } + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { + response := new(FetchResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { + response := new(OffsetCommitResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) { + response := new(OffsetFetchResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) { + response := new(JoinGroupResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) { + response := new(SyncGroupResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) { + response := new(LeaveGroupResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) { + response := new(HeartbeatResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) { + response := new(ListGroupsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) { + response := new(DescribeGroupsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) { + response := new(ApiVersionsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) { + response := new(CreatePartitionsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) { + response := new(CreateTopicsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) { + response := new(DeleteTopicsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) { + response := new(DeleteRecordsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) { + response := new(DescribeAclsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) { + response := new(CreateAclsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) { + response := new(DeleteAclsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) { + response := new(InitProducerIDResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) { + response := new(AddPartitionsToTxnResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) { + response := new(AddOffsetsToTxnResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) { + response := new(EndTxnResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) { + response := new(TxnOffsetCommitResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) { + response := new(DescribeConfigsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) { + response := new(AlterConfigsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) { + response := new(DeleteGroupsResponse) + + if err := b.sendAndReceive(request, response); err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.conn == nil { + if b.connErr != nil { + return nil, b.connErr + } + return nil, ErrNotConnected + } + + if !b.conf.Version.IsAtLeast(rb.requiredVersion()) { + return nil, ErrUnsupportedVersion + } + + req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return nil, err + } + + err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) + if err != nil { + return nil, err + } + + requestTime := time.Now() + bytes, err := b.conn.Write(buf) + b.updateOutgoingCommunicationMetrics(bytes) + if err != nil { + return nil, err + } + b.correlationID++ + + if !promiseResponse { + // Record request latency without the response + b.updateRequestLatencyMetrics(time.Since(requestTime)) + return nil, nil + } + + promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)} + b.responses <- promise + + return &promise, nil +} + +func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error { + promise, err := b.send(req, res != nil) + + if err != nil { + return err + } + + if promise == nil { + return nil + } + + select { + case buf := <-promise.packets: + return versionedDecode(buf, res, req.version()) + case err = <-promise.errors: + return err + } +} + +func (b *Broker) decode(pd packetDecoder, version int16) (err error) { + b.id, err = pd.getInt32() + if err != nil { + return err + } + + host, err := pd.getString() + if err != nil { + return err + } + + port, err := pd.getInt32() + if err != nil { + return err + } + + if version >= 1 { + b.rack, err = pd.getNullableString() + if err != nil { + return err + } + } + + b.addr = net.JoinHostPort(host, fmt.Sprint(port)) + if _, _, err := net.SplitHostPort(b.addr); err != nil { + return err + } + + return nil +} + +func (b *Broker) encode(pe packetEncoder, version int16) (err error) { + + host, portstr, err := net.SplitHostPort(b.addr) + if err != nil { + return err + } + port, err := strconv.Atoi(portstr) + if err != nil { + return err + } + + pe.putInt32(b.id) + + err = pe.putString(host) + if err != nil { + return err + } + + pe.putInt32(int32(port)) + + if version >= 1 { + err = pe.putNullableString(b.rack) + if err != nil { + return err + } + } + + return nil +} + +func (b *Broker) responseReceiver() { + var dead error + header := make([]byte, 8) + for response := range b.responses { + if dead != nil { + response.errors <- dead + continue + } + + err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)) + if err != nil { + dead = err + response.errors <- err + continue + } + + bytesReadHeader, err := io.ReadFull(b.conn, header) + requestLatency := time.Since(response.requestTime) + if err != nil { + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) + dead = err + response.errors <- err + continue + } + + decodedHeader := responseHeader{} + err = decode(header, &decodedHeader) + if err != nil { + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) + dead = err + response.errors <- err + continue + } + if decodedHeader.correlationID != response.correlationID { + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) + // TODO if decoded ID < cur ID, discard until we catch up + // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response + dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)} + response.errors <- dead + continue + } + + buf := make([]byte, decodedHeader.length-4) + bytesReadBody, err := io.ReadFull(b.conn, buf) + b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency) + if err != nil { + dead = err + response.errors <- err + continue + } + + response.packets <- buf + } + close(b.done) +} + +func (b *Broker) sendAndReceiveSASLPlainHandshake() error { + rb := &SaslHandshakeRequest{"PLAIN"} + req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return err + } + + err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) + if err != nil { + return err + } + + requestTime := time.Now() + bytes, err := b.conn.Write(buf) + b.updateOutgoingCommunicationMetrics(bytes) + if err != nil { + Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error()) + return err + } + b.correlationID++ + //wait for the response + header := make([]byte, 8) // response header + _, err = io.ReadFull(b.conn, header) + if err != nil { + Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error()) + return err + } + length := binary.BigEndian.Uint32(header[:4]) + payload := make([]byte, length-4) + n, err := io.ReadFull(b.conn, payload) + if err != nil { + Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error()) + return err + } + b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime)) + res := &SaslHandshakeResponse{} + err = versionedDecode(payload, res, 0) + if err != nil { + Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error()) + return err + } + if res.Err != ErrNoError { + Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error()) + return res.Err + } + Logger.Print("Successful SASL handshake") + return nil +} + +// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149) +// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9 +// +// In SASL Plain, Kafka expects the auth header to be in the following format +// Message format (from https://tools.ietf.org/html/rfc4616): +// +// message = [authzid] UTF8NUL authcid UTF8NUL passwd +// authcid = 1*SAFE ; MUST accept up to 255 octets +// authzid = 1*SAFE ; MUST accept up to 255 octets +// passwd = 1*SAFE ; MUST accept up to 255 octets +// UTF8NUL = %x00 ; UTF-8 encoded NUL character +// +// SAFE = UTF1 / UTF2 / UTF3 / UTF4 +// ;; any UTF-8 encoded Unicode character except NUL +// +// When credentials are valid, Kafka returns a 4 byte array of null characters. +// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way +// of responding to bad credentials but thats how its being done today. +func (b *Broker) sendAndReceiveSASLPlainAuth() error { + if b.conf.Net.SASL.Handshake { + handshakeErr := b.sendAndReceiveSASLPlainHandshake() + if handshakeErr != nil { + Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + return handshakeErr + } + } + length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) + authBytes := make([]byte, length+4) //4 byte length header + auth data + binary.BigEndian.PutUint32(authBytes, uint32(length)) + copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)) + + err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) + if err != nil { + Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error()) + return err + } + + requestTime := time.Now() + bytesWritten, err := b.conn.Write(authBytes) + b.updateOutgoingCommunicationMetrics(bytesWritten) + if err != nil { + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + + header := make([]byte, 4) + n, err := io.ReadFull(b.conn, header) + b.updateIncomingCommunicationMetrics(n, time.Since(requestTime)) + // If the credentials are valid, we would get a 4 byte response filled with null characters. + // Otherwise, the broker closes the connection and we get an EOF + if err != nil { + Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) + return err + } + + Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header) + return nil +} + +func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) { + b.updateRequestLatencyMetrics(requestLatency) + b.responseRate.Mark(1) + if b.brokerResponseRate != nil { + b.brokerResponseRate.Mark(1) + } + responseSize := int64(bytes) + b.incomingByteRate.Mark(responseSize) + if b.brokerIncomingByteRate != nil { + b.brokerIncomingByteRate.Mark(responseSize) + } + b.responseSize.Update(responseSize) + if b.brokerResponseSize != nil { + b.brokerResponseSize.Update(responseSize) + } +} + +func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) { + requestLatencyInMs := int64(requestLatency / time.Millisecond) + b.requestLatency.Update(requestLatencyInMs) + if b.brokerRequestLatency != nil { + b.brokerRequestLatency.Update(requestLatencyInMs) + } +} + +func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { + b.requestRate.Mark(1) + if b.brokerRequestRate != nil { + b.brokerRequestRate.Mark(1) + } + requestSize := int64(bytes) + b.outgoingByteRate.Mark(requestSize) + if b.brokerOutgoingByteRate != nil { + b.brokerOutgoingByteRate.Mark(requestSize) + } + b.requestSize.Update(requestSize) + if b.brokerRequestSize != nil { + b.brokerRequestSize.Update(requestSize) + } +} diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go new file mode 100644 index 0000000000..019cb43735 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/client.go @@ -0,0 +1,846 @@ +package sarama + +import ( + "math/rand" + "sort" + "sync" + "time" +) + +// Client is a generic Kafka client. It manages connections to one or more Kafka brokers. +// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected +// automatically when it passes out of scope. It is safe to share a client amongst many +// users, however Kafka will process requests from a single client strictly in serial, +// so it is generally more efficient to use the default one client per producer/consumer. +type Client interface { + // Config returns the Config struct of the client. This struct should not be + // altered after it has been created. + Config() *Config + + // Controller returns the cluster controller broker. + Controller() (*Broker, error) + + // Brokers returns the current set of active brokers as retrieved from cluster metadata. + Brokers() []*Broker + + // Topics returns the set of available topics as retrieved from cluster metadata. + Topics() ([]string, error) + + // Partitions returns the sorted list of all partition IDs for the given topic. + Partitions(topic string) ([]int32, error) + + // WritablePartitions returns the sorted list of all writable partition IDs for + // the given topic, where "writable" means "having a valid leader accepting + // writes". + WritablePartitions(topic string) ([]int32, error) + + // Leader returns the broker object that is the leader of the current + // topic/partition, as determined by querying the cluster metadata. + Leader(topic string, partitionID int32) (*Broker, error) + + // Replicas returns the set of all replica IDs for the given partition. + Replicas(topic string, partitionID int32) ([]int32, error) + + // InSyncReplicas returns the set of all in-sync replica IDs for the given + // partition. In-sync replicas are replicas which are fully caught up with + // the partition leader. + InSyncReplicas(topic string, partitionID int32) ([]int32, error) + + // RefreshMetadata takes a list of topics and queries the cluster to refresh the + // available metadata for those topics. If no topics are provided, it will refresh + // metadata for all topics. + RefreshMetadata(topics ...string) error + + // GetOffset queries the cluster to get the most recent available offset at the + // given time (in milliseconds) on the topic/partition combination. + // Time should be OffsetOldest for the earliest available offset, + // OffsetNewest for the offset of the message that will be produced next, or a time. + GetOffset(topic string, partitionID int32, time int64) (int64, error) + + // Coordinator returns the coordinating broker for a consumer group. It will + // return a locally cached value if it's available. You can call + // RefreshCoordinator to update the cached value. This function only works on + // Kafka 0.8.2 and higher. + Coordinator(consumerGroup string) (*Broker, error) + + // RefreshCoordinator retrieves the coordinator for a consumer group and stores it + // in local cache. This function only works on Kafka 0.8.2 and higher. + RefreshCoordinator(consumerGroup string) error + + // Close shuts down all broker connections managed by this client. It is required + // to call this function before a client object passes out of scope, as it will + // otherwise leak memory. You must close any Producers or Consumers using a client + // before you close the client. + Close() error + + // Closed returns true if the client has already had Close called on it + Closed() bool +} + +const ( + // OffsetNewest stands for the log head offset, i.e. the offset that will be + // assigned to the next message that will be produced to the partition. You + // can send this to a client's GetOffset method to get this offset, or when + // calling ConsumePartition to start consuming new messages. + OffsetNewest int64 = -1 + // OffsetOldest stands for the oldest offset available on the broker for a + // partition. You can send this to a client's GetOffset method to get this + // offset, or when calling ConsumePartition to start consuming from the + // oldest offset that is still available on the broker. + OffsetOldest int64 = -2 +) + +type client struct { + conf *Config + closer, closed chan none // for shutting down background metadata updater + + // the broker addresses given to us through the constructor are not guaranteed to be returned in + // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?) + // so we store them separately + seedBrokers []*Broker + deadSeeds []*Broker + + controllerID int32 // cluster controller broker id + brokers map[int32]*Broker // maps broker ids to brokers + metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata + coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs + + // If the number of partitions is large, we can get some churn calling cachedPartitions, + // so the result is cached. It is important to update this value whenever metadata is changed + cachedPartitionsResults map[string][maxPartitionIndex][]int32 + + lock sync.RWMutex // protects access to the maps that hold cluster state. +} + +// NewClient creates a new Client. It connects to one of the given broker addresses +// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot +// be retrieved from any of the given broker addresses, the client is not created. +func NewClient(addrs []string, conf *Config) (Client, error) { + Logger.Println("Initializing new client") + + if conf == nil { + conf = NewConfig() + } + + if err := conf.Validate(); err != nil { + return nil, err + } + + if len(addrs) < 1 { + return nil, ConfigurationError("You must provide at least one broker address") + } + + client := &client{ + conf: conf, + closer: make(chan none), + closed: make(chan none), + brokers: make(map[int32]*Broker), + metadata: make(map[string]map[int32]*PartitionMetadata), + cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32), + coordinators: make(map[string]int32), + } + + random := rand.New(rand.NewSource(time.Now().UnixNano())) + for _, index := range random.Perm(len(addrs)) { + client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) + } + + if conf.Metadata.Full { + // do an initial fetch of all cluster metadata by specifying an empty list of topics + err := client.RefreshMetadata() + switch err { + case nil: + break + case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed: + // indicates that maybe part of the cluster is down, but is not fatal to creating the client + Logger.Println(err) + default: + close(client.closed) // we haven't started the background updater yet, so we have to do this manually + _ = client.Close() + return nil, err + } + } + go withRecover(client.backgroundMetadataUpdater) + + Logger.Println("Successfully initialized new client") + + return client, nil +} + +func (client *client) Config() *Config { + return client.conf +} + +func (client *client) Brokers() []*Broker { + client.lock.RLock() + defer client.lock.RUnlock() + brokers := make([]*Broker, 0) + for _, broker := range client.brokers { + brokers = append(brokers, broker) + } + return brokers +} + +func (client *client) Close() error { + if client.Closed() { + // Chances are this is being called from a defer() and the error will go unobserved + // so we go ahead and log the event in this case. + Logger.Printf("Close() called on already closed client") + return ErrClosedClient + } + + // shutdown and wait for the background thread before we take the lock, to avoid races + close(client.closer) + <-client.closed + + client.lock.Lock() + defer client.lock.Unlock() + Logger.Println("Closing Client") + + for _, broker := range client.brokers { + safeAsyncClose(broker) + } + + for _, broker := range client.seedBrokers { + safeAsyncClose(broker) + } + + client.brokers = nil + client.metadata = nil + + return nil +} + +func (client *client) Closed() bool { + return client.brokers == nil +} + +func (client *client) Topics() ([]string, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + client.lock.RLock() + defer client.lock.RUnlock() + + ret := make([]string, 0, len(client.metadata)) + for topic := range client.metadata { + ret = append(ret, topic) + } + + return ret, nil +} + +func (client *client) Partitions(topic string) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + partitions := client.cachedPartitions(topic, allPartitions) + + if len(partitions) == 0 { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + partitions = client.cachedPartitions(topic, allPartitions) + } + + if partitions == nil { + return nil, ErrUnknownTopicOrPartition + } + + return partitions, nil +} + +func (client *client) WritablePartitions(topic string) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + partitions := client.cachedPartitions(topic, writablePartitions) + + // len==0 catches when it's nil (no such topic) and the odd case when every single + // partition is undergoing leader election simultaneously. Callers have to be able to handle + // this function returning an empty slice (which is a valid return value) but catching it + // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers + // a metadata refresh as a nicety so callers can just try again and don't have to manually + // trigger a refresh (otherwise they'd just keep getting a stale cached copy). + if len(partitions) == 0 { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + partitions = client.cachedPartitions(topic, writablePartitions) + } + + if partitions == nil { + return nil, ErrUnknownTopicOrPartition + } + + return partitions, nil +} + +func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return dupInt32Slice(metadata.Replicas), metadata.Err + } + return dupInt32Slice(metadata.Replicas), nil +} + +func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return dupInt32Slice(metadata.Isr), metadata.Err + } + return dupInt32Slice(metadata.Isr), nil +} + +func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + leader, err := client.cachedLeader(topic, partitionID) + + if leader == nil { + err = client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + leader, err = client.cachedLeader(topic, partitionID) + } + + return leader, err +} + +func (client *client) RefreshMetadata(topics ...string) error { + if client.Closed() { + return ErrClosedClient + } + + // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper + // error. This handles the case by returning an error instead of sending it + // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 + for _, topic := range topics { + if len(topic) == 0 { + return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return + } + } + + return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max) +} + +func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) { + if client.Closed() { + return -1, ErrClosedClient + } + + offset, err := client.getOffset(topic, partitionID, time) + + if err != nil { + if err := client.RefreshMetadata(topic); err != nil { + return -1, err + } + return client.getOffset(topic, partitionID, time) + } + + return offset, err +} + +func (client *client) Controller() (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + controller := client.cachedController() + if controller == nil { + if err := client.refreshMetadata(); err != nil { + return nil, err + } + controller = client.cachedController() + } + + if controller == nil { + return nil, ErrControllerNotAvailable + } + + _ = controller.Open(client.conf) + return controller, nil +} + +func (client *client) Coordinator(consumerGroup string) (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + coordinator := client.cachedCoordinator(consumerGroup) + + if coordinator == nil { + if err := client.RefreshCoordinator(consumerGroup); err != nil { + return nil, err + } + coordinator = client.cachedCoordinator(consumerGroup) + } + + if coordinator == nil { + return nil, ErrConsumerCoordinatorNotAvailable + } + + _ = coordinator.Open(client.conf) + return coordinator, nil +} + +func (client *client) RefreshCoordinator(consumerGroup string) error { + if client.Closed() { + return ErrClosedClient + } + + response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max) + if err != nil { + return err + } + + client.lock.Lock() + defer client.lock.Unlock() + client.registerBroker(response.Coordinator) + client.coordinators[consumerGroup] = response.Coordinator.ID() + return nil +} + +// private broker management helpers + +// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered +// in the brokers map. It returns the broker that is registered, which may be the provided broker, +// or a previously registered Broker instance. You must hold the write lock before calling this function. +func (client *client) registerBroker(broker *Broker) { + if client.brokers[broker.ID()] == nil { + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + } else if broker.Addr() != client.brokers[broker.ID()].Addr() { + safeAsyncClose(client.brokers[broker.ID()]) + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) + } +} + +// deregisterBroker removes a broker from the seedsBroker list, and if it's +// not the seedbroker, removes it from brokers map completely. +func (client *client) deregisterBroker(broker *Broker) { + client.lock.Lock() + defer client.lock.Unlock() + + if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] { + client.deadSeeds = append(client.deadSeeds, broker) + client.seedBrokers = client.seedBrokers[1:] + } else { + // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever, + // but we really shouldn't have to; once that loop is made better this case can be + // removed, and the function generally can be renamed from `deregisterBroker` to + // `nextSeedBroker` or something + Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) + delete(client.brokers, broker.ID()) + } +} + +func (client *client) resurrectDeadBrokers() { + client.lock.Lock() + defer client.lock.Unlock() + + Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds)) + client.seedBrokers = append(client.seedBrokers, client.deadSeeds...) + client.deadSeeds = nil +} + +func (client *client) any() *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + + if len(client.seedBrokers) > 0 { + _ = client.seedBrokers[0].Open(client.conf) + return client.seedBrokers[0] + } + + // not guaranteed to be random *or* deterministic + for _, broker := range client.brokers { + _ = broker.Open(client.conf) + return broker + } + + return nil +} + +// private caching/lazy metadata helpers + +type partitionType int + +const ( + allPartitions partitionType = iota + writablePartitions + // If you add any more types, update the partition cache in update() + + // Ensure this is the last partition type value + maxPartitionIndex +) + +func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions := client.metadata[topic] + if partitions != nil { + return partitions[partitionID] + } + + return nil +} + +func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions, exists := client.cachedPartitionsResults[topic] + + if !exists { + return nil + } + return partitions[partitionSet] +} + +func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 { + partitions := client.metadata[topic] + + if partitions == nil { + return nil + } + + ret := make([]int32, 0, len(partitions)) + for _, partition := range partitions { + if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable { + continue + } + ret = append(ret, partition.ID) + } + + sort.Sort(int32Slice(ret)) + return ret +} + +func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions := client.metadata[topic] + if partitions != nil { + metadata, ok := partitions[partitionID] + if ok { + if metadata.Err == ErrLeaderNotAvailable { + return nil, ErrLeaderNotAvailable + } + b := client.brokers[metadata.Leader] + if b == nil { + return nil, ErrLeaderNotAvailable + } + _ = b.Open(client.conf) + return b, nil + } + } + + return nil, ErrUnknownTopicOrPartition +} + +func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) { + broker, err := client.Leader(topic, partitionID) + if err != nil { + return -1, err + } + + request := &OffsetRequest{} + if client.conf.Version.IsAtLeast(V0_10_1_0) { + request.Version = 1 + } + request.AddBlock(topic, partitionID, time, 1) + + response, err := broker.GetAvailableOffsets(request) + if err != nil { + _ = broker.Close() + return -1, err + } + + block := response.GetBlock(topic, partitionID) + if block == nil { + _ = broker.Close() + return -1, ErrIncompleteResponse + } + if block.Err != ErrNoError { + return -1, block.Err + } + if len(block.Offsets) != 1 { + return -1, ErrOffsetOutOfRange + } + + return block.Offsets[0], nil +} + +// core metadata update logic + +func (client *client) backgroundMetadataUpdater() { + defer close(client.closed) + + if client.conf.Metadata.RefreshFrequency == time.Duration(0) { + return + } + + ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := client.refreshMetadata(); err != nil { + Logger.Println("Client background metadata update:", err) + } + case <-client.closer: + return + } + } +} + +func (client *client) refreshMetadata() error { + topics := []string{} + + if !client.conf.Metadata.Full { + if specificTopics, err := client.Topics(); err != nil { + return err + } else if len(specificTopics) == 0 { + return ErrNoTopicsToUpdateMetadata + } else { + topics = specificTopics + } + } + + if err := client.RefreshMetadata(topics...); err != nil { + return err + } + + return nil +} + +func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error { + retry := func(err error) error { + if attemptsRemaining > 0 { + Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) + time.Sleep(client.conf.Metadata.Retry.Backoff) + return client.tryRefreshMetadata(topics, attemptsRemaining-1) + } + return err + } + + for broker := client.any(); broker != nil; broker = client.any() { + if len(topics) > 0 { + Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) + } else { + Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) + } + + req := &MetadataRequest{Topics: topics} + if client.conf.Version.IsAtLeast(V0_10_0_0) { + req.Version = 1 + } + response, err := broker.GetMetadata(req) + + switch err.(type) { + case nil: + allKnownMetaData := len(topics) == 0 + // valid response, use it + shouldRetry, err := client.updateMetadata(response, allKnownMetaData) + if shouldRetry { + Logger.Println("client/metadata found some partitions to be leaderless") + return retry(err) // note: err can be nil + } + return err + + case PacketEncodingError: + // didn't even send, return the error + return err + default: + // some other error, remove that broker and try again + Logger.Println("client/metadata got error from broker while fetching metadata:", err) + _ = broker.Close() + client.deregisterBroker(broker) + } + } + + Logger.Println("client/metadata no available broker to send metadata request to") + client.resurrectDeadBrokers() + return retry(ErrOutOfBrokers) +} + +// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable +func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) { + client.lock.Lock() + defer client.lock.Unlock() + + // For all the brokers we received: + // - if it is a new ID, save it + // - if it is an existing ID, but the address we have is stale, discard the old one and save it + // - otherwise ignore it, replacing our existing one would just bounce the connection + for _, broker := range data.Brokers { + client.registerBroker(broker) + } + + client.controllerID = data.ControllerID + + if allKnownMetaData { + client.metadata = make(map[string]map[int32]*PartitionMetadata) + client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32) + } + for _, topic := range data.Topics { + delete(client.metadata, topic.Name) + delete(client.cachedPartitionsResults, topic.Name) + + switch topic.Err { + case ErrNoError: + break + case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results + err = topic.Err + continue + case ErrUnknownTopicOrPartition: // retry, do not store partial partition results + err = topic.Err + retry = true + continue + case ErrLeaderNotAvailable: // retry, but store partial partition results + retry = true + break + default: // don't retry, don't store partial results + Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err) + err = topic.Err + continue + } + + client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions)) + for _, partition := range topic.Partitions { + client.metadata[topic.Name][partition.ID] = partition + if partition.Err == ErrLeaderNotAvailable { + retry = true + } + } + + var partitionCache [maxPartitionIndex][]int32 + partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions) + partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions) + client.cachedPartitionsResults[topic.Name] = partitionCache + } + + return +} + +func (client *client) cachedCoordinator(consumerGroup string) *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + if coordinatorID, ok := client.coordinators[consumerGroup]; ok { + return client.brokers[coordinatorID] + } + return nil +} + +func (client *client) cachedController() *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + + return client.brokers[client.controllerID] +} + +func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) { + retry := func(err error) (*FindCoordinatorResponse, error) { + if attemptsRemaining > 0 { + Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) + time.Sleep(client.conf.Metadata.Retry.Backoff) + return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1) + } + return nil, err + } + + for broker := client.any(); broker != nil; broker = client.any() { + Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr()) + + request := new(FindCoordinatorRequest) + request.CoordinatorKey = consumerGroup + request.CoordinatorType = CoordinatorGroup + + response, err := broker.FindCoordinator(request) + + if err != nil { + Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err) + + switch err.(type) { + case PacketEncodingError: + return nil, err + default: + _ = broker.Close() + client.deregisterBroker(broker) + continue + } + } + + switch response.Err { + case ErrNoError: + Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) + return response, nil + + case ErrConsumerCoordinatorNotAvailable: + Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup) + + // This is very ugly, but this scenario will only happen once per cluster. + // The __consumer_offsets topic only has to be created one time. + // The number of partitions not configurable, but partition 0 should always exist. + if _, err := client.Leader("__consumer_offsets", 0); err != nil { + Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n") + time.Sleep(2 * time.Second) + } + + return retry(ErrConsumerCoordinatorNotAvailable) + default: + return nil, response.Err + } + } + + Logger.Println("client/coordinator no available broker to send consumer metadata request to") + client.resurrectDeadBrokers() + return retry(ErrOutOfBrokers) +} diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go new file mode 100644 index 0000000000..a564b5c23e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/config.go @@ -0,0 +1,458 @@ +package sarama + +import ( + "compress/gzip" + "crypto/tls" + "fmt" + "io/ioutil" + "regexp" + "time" + + "github.com/rcrowley/go-metrics" +) + +const defaultClientID = "sarama" + +var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) + +// Config is used to pass multiple configuration options to Sarama's constructors. +type Config struct { + // Net is the namespace for network-level properties used by the Broker, and + // shared by the Client/Producer/Consumer. + Net struct { + // How many outstanding requests a connection is allowed to have before + // sending on it blocks (default 5). + MaxOpenRequests int + + // All three of the below configurations are similar to the + // `socket.timeout.ms` setting in JVM kafka. All of them default + // to 30 seconds. + DialTimeout time.Duration // How long to wait for the initial connection. + ReadTimeout time.Duration // How long to wait for a response. + WriteTimeout time.Duration // How long to wait for a transmit. + + TLS struct { + // Whether or not to use TLS when connecting to the broker + // (defaults to false). + Enable bool + // The TLS configuration to use for secure connections if + // enabled (defaults to nil). + Config *tls.Config + } + + // SASL based authentication with broker. While there are multiple SASL authentication methods + // the current implementation is limited to plaintext (SASL/PLAIN) authentication + SASL struct { + // Whether or not to use SASL authentication when connecting to the broker + // (defaults to false). + Enable bool + // Whether or not to send the Kafka SASL handshake first if enabled + // (defaults to true). You should only set this to false if you're using + // a non-Kafka SASL proxy. + Handshake bool + //username and password for SASL/PLAIN authentication + User string + Password string + } + + // KeepAlive specifies the keep-alive period for an active network connection. + // If zero, keep-alives are disabled. (default is 0: disabled). + KeepAlive time.Duration + } + + // Metadata is the namespace for metadata management properties used by the + // Client, and shared by the Producer/Consumer. + Metadata struct { + Retry struct { + // The total number of times to retry a metadata request when the + // cluster is in the middle of a leader election (default 3). + Max int + // How long to wait for leader election to occur before retrying + // (default 250ms). Similar to the JVM's `retry.backoff.ms`. + Backoff time.Duration + } + // How frequently to refresh the cluster metadata in the background. + // Defaults to 10 minutes. Set to 0 to disable. Similar to + // `topic.metadata.refresh.interval.ms` in the JVM version. + RefreshFrequency time.Duration + + // Whether to maintain a full set of metadata for all topics, or just + // the minimal set that has been necessary so far. The full set is simpler + // and usually more convenient, but can take up a substantial amount of + // memory if you have many topics and partitions. Defaults to true. + Full bool + } + + // Producer is the namespace for configuration related to producing messages, + // used by the Producer. + Producer struct { + // The maximum permitted size of a message (defaults to 1000000). Should be + // set equal to or smaller than the broker's `message.max.bytes`. + MaxMessageBytes int + // The level of acknowledgement reliability needed from the broker (defaults + // to WaitForLocal). Equivalent to the `request.required.acks` setting of the + // JVM producer. + RequiredAcks RequiredAcks + // The maximum duration the broker will wait the receipt of the number of + // RequiredAcks (defaults to 10 seconds). This is only relevant when + // RequiredAcks is set to WaitForAll or a number > 1. Only supports + // millisecond resolution, nanoseconds will be truncated. Equivalent to + // the JVM producer's `request.timeout.ms` setting. + Timeout time.Duration + // The type of compression to use on messages (defaults to no compression). + // Similar to `compression.codec` setting of the JVM producer. + Compression CompressionCodec + // The level of compression to use on messages. The meaning depends + // on the actual compression type used and defaults to default compression + // level for the codec. + CompressionLevel int + // Generates partitioners for choosing the partition to send messages to + // (defaults to hashing the message key). Similar to the `partitioner.class` + // setting for the JVM producer. + Partitioner PartitionerConstructor + + // Return specifies what channels will be populated. If they are set to true, + // you must read from the respective channels to prevent deadlock. If, + // however, this config is used to create a `SyncProducer`, both must be set + // to true and you shall not read from the channels since the producer does + // this internally. + Return struct { + // If enabled, successfully delivered messages will be returned on the + // Successes channel (default disabled). + Successes bool + + // If enabled, messages that failed to deliver will be returned on the + // Errors channel, including error (default enabled). + Errors bool + } + + // The following config options control how often messages are batched up and + // sent to the broker. By default, messages are sent as fast as possible, and + // all messages received while the current batch is in-flight are placed + // into the subsequent batch. + Flush struct { + // The best-effort number of bytes needed to trigger a flush. Use the + // global sarama.MaxRequestSize to set a hard upper limit. + Bytes int + // The best-effort number of messages needed to trigger a flush. Use + // `MaxMessages` to set a hard upper limit. + Messages int + // The best-effort frequency of flushes. Equivalent to + // `queue.buffering.max.ms` setting of JVM producer. + Frequency time.Duration + // The maximum number of messages the producer will send in a single + // broker request. Defaults to 0 for unlimited. Similar to + // `queue.buffering.max.messages` in the JVM producer. + MaxMessages int + } + + Retry struct { + // The total number of times to retry sending a message (default 3). + // Similar to the `message.send.max.retries` setting of the JVM producer. + Max int + // How long to wait for the cluster to settle between retries + // (default 100ms). Similar to the `retry.backoff.ms` setting of the + // JVM producer. + Backoff time.Duration + } + } + + // Consumer is the namespace for configuration related to consuming messages, + // used by the Consumer. + // + // Note that Sarama's Consumer type does not currently support automatic + // consumer-group rebalancing and offset tracking. For Zookeeper-based + // tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka + // library builds on Sarama to add this support. For Kafka-based tracking + // (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library + // builds on Sarama to add this support. + Consumer struct { + Retry struct { + // How long to wait after a failing to read from a partition before + // trying again (default 2s). + Backoff time.Duration + } + + // Fetch is the namespace for controlling how many bytes are retrieved by any + // given request. + Fetch struct { + // The minimum number of message bytes to fetch in a request - the broker + // will wait until at least this many are available. The default is 1, + // as 0 causes the consumer to spin when no messages are available. + // Equivalent to the JVM's `fetch.min.bytes`. + Min int32 + // The default number of message bytes to fetch from the broker in each + // request (default 1MB). This should be larger than the majority of + // your messages, or else the consumer will spend a lot of time + // negotiating sizes and not actually consuming. Similar to the JVM's + // `fetch.message.max.bytes`. + Default int32 + // The maximum number of message bytes to fetch from the broker in a + // single request. Messages larger than this will return + // ErrMessageTooLarge and will not be consumable, so you must be sure + // this is at least as large as your largest message. Defaults to 0 + // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The + // global `sarama.MaxResponseSize` still applies. + Max int32 + } + // The maximum amount of time the broker will wait for Consumer.Fetch.Min + // bytes to become available before it returns fewer than that anyways. The + // default is 250ms, since 0 causes the consumer to spin when no events are + // available. 100-500ms is a reasonable range for most cases. Kafka only + // supports precision up to milliseconds; nanoseconds will be truncated. + // Equivalent to the JVM's `fetch.wait.max.ms`. + MaxWaitTime time.Duration + + // The maximum amount of time the consumer expects a message takes to + // process for the user. If writing to the Messages channel takes longer + // than this, that partition will stop fetching more messages until it + // can proceed again. + // Note that, since the Messages channel is buffered, the actual grace time is + // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms. + // If a message is not written to the Messages channel between two ticks + // of the expiryTicker then a timeout is detected. + // Using a ticker instead of a timer to detect timeouts should typically + // result in many fewer calls to Timer functions which may result in a + // significant performance improvement if many messages are being sent + // and timeouts are infrequent. + // The disadvantage of using a ticker instead of a timer is that + // timeouts will be less accurate. That is, the effective timeout could + // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For + // example, if `MaxProcessingTime` is 100ms then a delay of 180ms + // between two messages being sent may not be recognized as a timeout. + MaxProcessingTime time.Duration + + // Return specifies what channels will be populated. If they are set to true, + // you must read from them to prevent deadlock. + Return struct { + // If enabled, any errors that occurred while consuming are returned on + // the Errors channel (default disabled). + Errors bool + } + + // Offsets specifies configuration for how and when to commit consumed + // offsets. This currently requires the manual use of an OffsetManager + // but will eventually be automated. + Offsets struct { + // How frequently to commit updated offsets. Defaults to 1s. + CommitInterval time.Duration + + // The initial offset to use if no offset was previously committed. + // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest. + Initial int64 + + // The retention duration for committed offsets. If zero, disabled + // (in which case the `offsets.retention.minutes` option on the + // broker will be used). Kafka only supports precision up to + // milliseconds; nanoseconds will be truncated. Requires Kafka + // broker version 0.9.0 or later. + // (default is 0: disabled). + Retention time.Duration + } + } + + // A user-provided string sent with every request to the brokers for logging, + // debugging, and auditing purposes. Defaults to "sarama", but you should + // probably set it to something specific to your application. + ClientID string + // The number of events to buffer in internal and external channels. This + // permits the producer and consumer to continue processing some messages + // in the background while user code is working, greatly improving throughput. + // Defaults to 256. + ChannelBufferSize int + // The version of Kafka that Sarama will assume it is running against. + // Defaults to the oldest supported stable version. Since Kafka provides + // backwards-compatibility, setting it to a version older than you have + // will not break anything, although it may prevent you from using the + // latest features. Setting it to a version greater than you are actually + // running may lead to random breakage. + Version KafkaVersion + // The registry to define metrics into. + // Defaults to a local registry. + // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true" + // prior to starting Sarama. + // See Examples on how to use the metrics registry + MetricRegistry metrics.Registry +} + +// NewConfig returns a new configuration instance with sane defaults. +func NewConfig() *Config { + c := &Config{} + + c.Net.MaxOpenRequests = 5 + c.Net.DialTimeout = 30 * time.Second + c.Net.ReadTimeout = 30 * time.Second + c.Net.WriteTimeout = 30 * time.Second + c.Net.SASL.Handshake = true + + c.Metadata.Retry.Max = 3 + c.Metadata.Retry.Backoff = 250 * time.Millisecond + c.Metadata.RefreshFrequency = 10 * time.Minute + c.Metadata.Full = true + + c.Producer.MaxMessageBytes = 1000000 + c.Producer.RequiredAcks = WaitForLocal + c.Producer.Timeout = 10 * time.Second + c.Producer.Partitioner = NewHashPartitioner + c.Producer.Retry.Max = 3 + c.Producer.Retry.Backoff = 100 * time.Millisecond + c.Producer.Return.Errors = true + c.Producer.CompressionLevel = CompressionLevelDefault + + c.Consumer.Fetch.Min = 1 + c.Consumer.Fetch.Default = 1024 * 1024 + c.Consumer.Retry.Backoff = 2 * time.Second + c.Consumer.MaxWaitTime = 250 * time.Millisecond + c.Consumer.MaxProcessingTime = 100 * time.Millisecond + c.Consumer.Return.Errors = false + c.Consumer.Offsets.CommitInterval = 1 * time.Second + c.Consumer.Offsets.Initial = OffsetNewest + + c.ClientID = defaultClientID + c.ChannelBufferSize = 256 + c.Version = MinVersion + c.MetricRegistry = metrics.NewRegistry() + + return c +} + +// Validate checks a Config instance. It will return a +// ConfigurationError if the specified values don't make sense. +func (c *Config) Validate() error { + // some configuration values should be warned on but not fail completely, do those first + if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil { + Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.") + } + if c.Net.SASL.Enable == false { + if c.Net.SASL.User != "" { + Logger.Println("Net.SASL is disabled but a non-empty username was provided.") + } + if c.Net.SASL.Password != "" { + Logger.Println("Net.SASL is disabled but a non-empty password was provided.") + } + } + if c.Producer.RequiredAcks > 1 { + Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.") + } + if c.Producer.MaxMessageBytes >= int(MaxRequestSize) { + Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.") + } + if c.Producer.Flush.Bytes >= int(MaxRequestSize) { + Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.") + } + if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 { + Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.") + } + if c.Producer.Timeout%time.Millisecond != 0 { + Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.") + } + if c.Consumer.MaxWaitTime < 100*time.Millisecond { + Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.") + } + if c.Consumer.MaxWaitTime%time.Millisecond != 0 { + Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Consumer.Offsets.Retention%time.Millisecond != 0 { + Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.") + } + if c.ClientID == defaultClientID { + Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.") + } + + // validate Net values + switch { + case c.Net.MaxOpenRequests <= 0: + return ConfigurationError("Net.MaxOpenRequests must be > 0") + case c.Net.DialTimeout <= 0: + return ConfigurationError("Net.DialTimeout must be > 0") + case c.Net.ReadTimeout <= 0: + return ConfigurationError("Net.ReadTimeout must be > 0") + case c.Net.WriteTimeout <= 0: + return ConfigurationError("Net.WriteTimeout must be > 0") + case c.Net.KeepAlive < 0: + return ConfigurationError("Net.KeepAlive must be >= 0") + case c.Net.SASL.Enable == true && c.Net.SASL.User == "": + return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") + case c.Net.SASL.Enable == true && c.Net.SASL.Password == "": + return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + } + + // validate the Metadata values + switch { + case c.Metadata.Retry.Max < 0: + return ConfigurationError("Metadata.Retry.Max must be >= 0") + case c.Metadata.Retry.Backoff < 0: + return ConfigurationError("Metadata.Retry.Backoff must be >= 0") + case c.Metadata.RefreshFrequency < 0: + return ConfigurationError("Metadata.RefreshFrequency must be >= 0") + } + + // validate the Producer values + switch { + case c.Producer.MaxMessageBytes <= 0: + return ConfigurationError("Producer.MaxMessageBytes must be > 0") + case c.Producer.RequiredAcks < -1: + return ConfigurationError("Producer.RequiredAcks must be >= -1") + case c.Producer.Timeout <= 0: + return ConfigurationError("Producer.Timeout must be > 0") + case c.Producer.Partitioner == nil: + return ConfigurationError("Producer.Partitioner must not be nil") + case c.Producer.Flush.Bytes < 0: + return ConfigurationError("Producer.Flush.Bytes must be >= 0") + case c.Producer.Flush.Messages < 0: + return ConfigurationError("Producer.Flush.Messages must be >= 0") + case c.Producer.Flush.Frequency < 0: + return ConfigurationError("Producer.Flush.Frequency must be >= 0") + case c.Producer.Flush.MaxMessages < 0: + return ConfigurationError("Producer.Flush.MaxMessages must be >= 0") + case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages: + return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set") + case c.Producer.Retry.Max < 0: + return ConfigurationError("Producer.Retry.Max must be >= 0") + case c.Producer.Retry.Backoff < 0: + return ConfigurationError("Producer.Retry.Backoff must be >= 0") + } + + if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) { + return ConfigurationError("lz4 compression requires Version >= V0_10_0_0") + } + + if c.Producer.Compression == CompressionGZIP { + if c.Producer.CompressionLevel != CompressionLevelDefault { + if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil { + return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err)) + } + } + } + + // validate the Consumer values + switch { + case c.Consumer.Fetch.Min <= 0: + return ConfigurationError("Consumer.Fetch.Min must be > 0") + case c.Consumer.Fetch.Default <= 0: + return ConfigurationError("Consumer.Fetch.Default must be > 0") + case c.Consumer.Fetch.Max < 0: + return ConfigurationError("Consumer.Fetch.Max must be >= 0") + case c.Consumer.MaxWaitTime < 1*time.Millisecond: + return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms") + case c.Consumer.MaxProcessingTime <= 0: + return ConfigurationError("Consumer.MaxProcessingTime must be > 0") + case c.Consumer.Retry.Backoff < 0: + return ConfigurationError("Consumer.Retry.Backoff must be >= 0") + case c.Consumer.Offsets.CommitInterval <= 0: + return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0") + case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest: + return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest") + + } + + // validate misc shared values + switch { + case c.ChannelBufferSize < 0: + return ConfigurationError("ChannelBufferSize must be >= 0") + case !validID.MatchString(c.ClientID): + return ConfigurationError("ClientID is invalid") + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go new file mode 100644 index 0000000000..848cc9c90c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/config_resource_type.go @@ -0,0 +1,15 @@ +package sarama + +type ConfigResourceType int8 + +// Taken from : +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes + +const ( + UnknownResource ConfigResourceType = 0 + AnyResource ConfigResourceType = 1 + TopicResource ConfigResourceType = 2 + GroupResource ConfigResourceType = 3 + ClusterResource ConfigResourceType = 4 + BrokerResource ConfigResourceType = 5 +) diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go new file mode 100644 index 0000000000..96226ac5bf --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer.go @@ -0,0 +1,808 @@ +package sarama + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// ConsumerMessage encapsulates a Kafka message returned by the consumer. +type ConsumerMessage struct { + Key, Value []byte + Topic string + Partition int32 + Offset int64 + Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp + BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp + Headers []*RecordHeader // only set if kafka is version 0.11+ +} + +// ConsumerError is what is provided to the user when an error occurs. +// It wraps an error and includes the topic and partition. +type ConsumerError struct { + Topic string + Partition int32 + Err error +} + +func (ce ConsumerError) Error() string { + return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err) +} + +// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface. +// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors +// when stopping. +type ConsumerErrors []*ConsumerError + +func (ce ConsumerErrors) Error() string { + return fmt.Sprintf("kafka: %d errors while consuming", len(ce)) +} + +// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close() +// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of +// scope. +// +// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking. +// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library +// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the +// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. +type Consumer interface { + + // Topics returns the set of available topics as retrieved from the cluster + // metadata. This method is the same as Client.Topics(), and is provided for + // convenience. + Topics() ([]string, error) + + // Partitions returns the sorted list of all partition IDs for the given topic. + // This method is the same as Client.Partitions(), and is provided for convenience. + Partitions(topic string) ([]int32, error) + + // ConsumePartition creates a PartitionConsumer on the given topic/partition with + // the given offset. It will return an error if this Consumer is already consuming + // on the given topic/partition. Offset can be a literal offset, or OffsetNewest + // or OffsetOldest + ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) + + // HighWaterMarks returns the current high water marks for each topic and partition. + // Consistency between partitions is not guaranteed since high water marks are updated separately. + HighWaterMarks() map[string]map[int32]int64 + + // Close shuts down the consumer. It must be called after all child + // PartitionConsumers have already been closed. + Close() error +} + +type consumer struct { + client Client + conf *Config + ownClient bool + + lock sync.Mutex + children map[string]map[int32]*partitionConsumer + brokerConsumers map[*Broker]*brokerConsumer +} + +// NewConsumer creates a new consumer using the given broker addresses and configuration. +func NewConsumer(addrs []string, config *Config) (Consumer, error) { + client, err := NewClient(addrs, config) + if err != nil { + return nil, err + } + + c, err := NewConsumerFromClient(client) + if err != nil { + return nil, err + } + c.(*consumer).ownClient = true + return c, nil +} + +// NewConsumerFromClient creates a new consumer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this consumer. +func NewConsumerFromClient(client Client) (Consumer, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + c := &consumer{ + client: client, + conf: client.Config(), + children: make(map[string]map[int32]*partitionConsumer), + brokerConsumers: make(map[*Broker]*brokerConsumer), + } + + return c, nil +} + +func (c *consumer) Close() error { + if c.ownClient { + return c.client.Close() + } + return nil +} + +func (c *consumer) Topics() ([]string, error) { + return c.client.Topics() +} + +func (c *consumer) Partitions(topic string) ([]int32, error) { + return c.client.Partitions(topic) +} + +func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) { + child := &partitionConsumer{ + consumer: c, + conf: c.conf, + topic: topic, + partition: partition, + messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), + errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), + feeder: make(chan *FetchResponse, 1), + trigger: make(chan none, 1), + dying: make(chan none), + fetchSize: c.conf.Consumer.Fetch.Default, + } + + if err := child.chooseStartingOffset(offset); err != nil { + return nil, err + } + + var leader *Broker + var err error + if leader, err = c.client.Leader(child.topic, child.partition); err != nil { + return nil, err + } + + if err := c.addChild(child); err != nil { + return nil, err + } + + go withRecover(child.dispatcher) + go withRecover(child.responseFeeder) + + child.broker = c.refBrokerConsumer(leader) + child.broker.input <- child + + return child, nil +} + +func (c *consumer) HighWaterMarks() map[string]map[int32]int64 { + c.lock.Lock() + defer c.lock.Unlock() + + hwms := make(map[string]map[int32]int64) + for topic, p := range c.children { + hwm := make(map[int32]int64, len(p)) + for partition, pc := range p { + hwm[partition] = pc.HighWaterMarkOffset() + } + hwms[topic] = hwm + } + + return hwms +} + +func (c *consumer) addChild(child *partitionConsumer) error { + c.lock.Lock() + defer c.lock.Unlock() + + topicChildren := c.children[child.topic] + if topicChildren == nil { + topicChildren = make(map[int32]*partitionConsumer) + c.children[child.topic] = topicChildren + } + + if topicChildren[child.partition] != nil { + return ConfigurationError("That topic/partition is already being consumed") + } + + topicChildren[child.partition] = child + return nil +} + +func (c *consumer) removeChild(child *partitionConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.children[child.topic], child.partition) +} + +func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer { + c.lock.Lock() + defer c.lock.Unlock() + + bc := c.brokerConsumers[broker] + if bc == nil { + bc = c.newBrokerConsumer(broker) + c.brokerConsumers[broker] = bc + } + + bc.refs++ + + return bc +} + +func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + brokerWorker.refs-- + + if brokerWorker.refs == 0 { + close(brokerWorker.input) + if c.brokerConsumers[brokerWorker.broker] == brokerWorker { + delete(c.brokerConsumers, brokerWorker.broker) + } + } +} + +func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.brokerConsumers, brokerWorker.broker) +} + +// PartitionConsumer + +// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or +// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out +// of scope. +// +// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range +// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported +// as out of range by the brokers. In this case you should decide what you want to do (try a different offset, +// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying. +// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set +// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement +// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. +// +// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of +// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process +// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call +// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will +// also drain the Messages channel, harvest all errors & return them once cleanup has completed. +type PartitionConsumer interface { + + // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you + // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this + // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call + // this before calling Close on the underlying client. + AsyncClose() + + // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain + // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service + // the Messages channel when this function is called, you will be competing with Close for messages; consider + // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes + // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client. + Close() error + + // Messages returns the read channel for the messages that are returned by + // the broker. + Messages() <-chan *ConsumerMessage + + // Errors returns a read channel of errors that occurred during consuming, if + // enabled. By default, errors are logged and not returned over this channel. + // If you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan *ConsumerError + + // HighWaterMarkOffset returns the high water mark offset of the partition, + // i.e. the offset that will be used for the next message that will be produced. + // You can use this to determine how far behind the processing is. + HighWaterMarkOffset() int64 +} + +type partitionConsumer struct { + highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG + consumer *consumer + conf *Config + topic string + partition int32 + + broker *brokerConsumer + messages chan *ConsumerMessage + errors chan *ConsumerError + feeder chan *FetchResponse + + trigger, dying chan none + responseResult error + closeOnce sync.Once + + fetchSize int32 + offset int64 +} + +var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing + +func (child *partitionConsumer) sendError(err error) { + cErr := &ConsumerError{ + Topic: child.topic, + Partition: child.partition, + Err: err, + } + + if child.conf.Consumer.Return.Errors { + child.errors <- cErr + } else { + Logger.Println(cErr) + } +} + +func (child *partitionConsumer) dispatcher() { + for range child.trigger { + select { + case <-child.dying: + close(child.trigger) + case <-time.After(child.conf.Consumer.Retry.Backoff): + if child.broker != nil { + child.consumer.unrefBrokerConsumer(child.broker) + child.broker = nil + } + + Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition) + if err := child.dispatch(); err != nil { + child.sendError(err) + child.trigger <- none{} + } + } + } + + if child.broker != nil { + child.consumer.unrefBrokerConsumer(child.broker) + } + child.consumer.removeChild(child) + close(child.feeder) +} + +func (child *partitionConsumer) dispatch() error { + if err := child.consumer.client.RefreshMetadata(child.topic); err != nil { + return err + } + + var leader *Broker + var err error + if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil { + return err + } + + child.broker = child.consumer.refBrokerConsumer(leader) + + child.broker.input <- child + + return nil +} + +func (child *partitionConsumer) chooseStartingOffset(offset int64) error { + newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest) + if err != nil { + return err + } + oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest) + if err != nil { + return err + } + + switch { + case offset == OffsetNewest: + child.offset = newestOffset + case offset == OffsetOldest: + child.offset = oldestOffset + case offset >= oldestOffset && offset <= newestOffset: + child.offset = offset + default: + return ErrOffsetOutOfRange + } + + return nil +} + +func (child *partitionConsumer) Messages() <-chan *ConsumerMessage { + return child.messages +} + +func (child *partitionConsumer) Errors() <-chan *ConsumerError { + return child.errors +} + +func (child *partitionConsumer) AsyncClose() { + // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes + // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and + // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will + // also just close itself) + child.closeOnce.Do(func() { + close(child.dying) + }) +} + +func (child *partitionConsumer) Close() error { + child.AsyncClose() + + go withRecover(func() { + for range child.messages { + // drain + } + }) + + var errors ConsumerErrors + for err := range child.errors { + errors = append(errors, err) + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (child *partitionConsumer) HighWaterMarkOffset() int64 { + return atomic.LoadInt64(&child.highWaterMarkOffset) +} + +func (child *partitionConsumer) responseFeeder() { + var msgs []*ConsumerMessage + expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime) + firstAttempt := true + +feederLoop: + for response := range child.feeder { + msgs, child.responseResult = child.parseResponse(response) + + for i, msg := range msgs { + messageSelect: + select { + case child.messages <- msg: + firstAttempt = true + case <-expiryTicker.C: + if !firstAttempt { + child.responseResult = errTimedOut + child.broker.acks.Done() + for _, msg = range msgs[i:] { + child.messages <- msg + } + child.broker.input <- child + continue feederLoop + } else { + // current message has not been sent, return to select + // statement + firstAttempt = false + goto messageSelect + } + } + } + + child.broker.acks.Done() + } + + expiryTicker.Stop() + close(child.messages) + close(child.errors) +} + +func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) { + var messages []*ConsumerMessage + for _, msgBlock := range msgSet.Messages { + for _, msg := range msgBlock.Messages() { + offset := msg.Offset + if msg.Msg.Version >= 1 { + baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset + offset += baseOffset + } + if offset < child.offset { + continue + } + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: msg.Msg.Key, + Value: msg.Msg.Value, + Offset: offset, + Timestamp: msg.Msg.Timestamp, + BlockTimestamp: msgBlock.Msg.Timestamp, + }) + child.offset = offset + 1 + } + } + if len(messages) == 0 { + return nil, ErrIncompleteResponse + } + return messages, nil +} + +func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) { + var messages []*ConsumerMessage + for _, rec := range batch.Records { + offset := batch.FirstOffset + rec.OffsetDelta + if offset < child.offset { + continue + } + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: rec.Key, + Value: rec.Value, + Offset: offset, + Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta), + Headers: rec.Headers, + }) + child.offset = offset + 1 + } + if len(messages) == 0 { + return nil, ErrIncompleteResponse + } + return messages, nil +} + +func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { + block := response.GetBlock(child.topic, child.partition) + if block == nil { + return nil, ErrIncompleteResponse + } + + if block.Err != ErrNoError { + return nil, block.Err + } + + nRecs, err := block.numRecords() + if err != nil { + return nil, err + } + if nRecs == 0 { + partialTrailingMessage, err := block.isPartial() + if err != nil { + return nil, err + } + // We got no messages. If we got a trailing one then we need to ask for more data. + // Otherwise we just poll again and wait for one to be produced... + if partialTrailingMessage { + if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max { + // we can't ask for more data, we've hit the configured limit + child.sendError(ErrMessageTooLarge) + child.offset++ // skip this one so we can keep processing future messages + } else { + child.fetchSize *= 2 + if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max { + child.fetchSize = child.conf.Consumer.Fetch.Max + } + } + } + + return nil, nil + } + + // we got messages, reset our fetch size in case it was increased for a previous request + child.fetchSize = child.conf.Consumer.Fetch.Default + atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) + + messages := []*ConsumerMessage{} + for _, records := range block.RecordsSet { + if control, err := records.isControl(); err != nil || control { + continue + } + + switch records.recordsType { + case legacyRecords: + messageSetMessages, err := child.parseMessages(records.MsgSet) + if err != nil { + return nil, err + } + + messages = append(messages, messageSetMessages...) + case defaultRecords: + recordBatchMessages, err := child.parseRecords(records.RecordBatch) + if err != nil { + return nil, err + } + + messages = append(messages, recordBatchMessages...) + default: + return nil, fmt.Errorf("unknown records type: %v", records.recordsType) + } + } + + return messages, nil +} + +// brokerConsumer + +type brokerConsumer struct { + consumer *consumer + broker *Broker + input chan *partitionConsumer + newSubscriptions chan []*partitionConsumer + wait chan none + subscriptions map[*partitionConsumer]none + acks sync.WaitGroup + refs int +} + +func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { + bc := &brokerConsumer{ + consumer: c, + broker: broker, + input: make(chan *partitionConsumer), + newSubscriptions: make(chan []*partitionConsumer), + wait: make(chan none), + subscriptions: make(map[*partitionConsumer]none), + refs: 0, + } + + go withRecover(bc.subscriptionManager) + go withRecover(bc.subscriptionConsumer) + + return bc +} + +func (bc *brokerConsumer) subscriptionManager() { + var buffer []*partitionConsumer + + // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer + // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks + // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give + // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, + // so the main goroutine can block waiting for work if it has none. + for { + if len(buffer) > 0 { + select { + case event, ok := <-bc.input: + if !ok { + goto done + } + buffer = append(buffer, event) + case bc.newSubscriptions <- buffer: + buffer = nil + case bc.wait <- none{}: + } + } else { + select { + case event, ok := <-bc.input: + if !ok { + goto done + } + buffer = append(buffer, event) + case bc.newSubscriptions <- nil: + } + } + } + +done: + close(bc.wait) + if len(buffer) > 0 { + bc.newSubscriptions <- buffer + } + close(bc.newSubscriptions) +} + +func (bc *brokerConsumer) subscriptionConsumer() { + <-bc.wait // wait for our first piece of work + + // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available + for newSubscriptions := range bc.newSubscriptions { + bc.updateSubscriptions(newSubscriptions) + + if len(bc.subscriptions) == 0 { + // We're about to be shut down or we're about to receive more subscriptions. + // Either way, the signal just hasn't propagated to our goroutine yet. + <-bc.wait + continue + } + + response, err := bc.fetchNewMessages() + + if err != nil { + Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err) + bc.abort(err) + return + } + + bc.acks.Add(len(bc.subscriptions)) + for child := range bc.subscriptions { + child.feeder <- response + } + bc.acks.Wait() + bc.handleResponses() + } +} + +func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) { + for _, child := range newSubscriptions { + bc.subscriptions[child] = none{} + Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) + } + + for child := range bc.subscriptions { + select { + case <-child.dying: + Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) + close(child.trigger) + delete(bc.subscriptions, child) + default: + break + } + } +} + +func (bc *brokerConsumer) handleResponses() { + // handles the response codes left for us by our subscriptions, and abandons ones that have been closed + for child := range bc.subscriptions { + result := child.responseResult + child.responseResult = nil + + switch result { + case nil: + break + case errTimedOut: + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", + bc.broker.ID(), child.topic, child.partition) + delete(bc.subscriptions, child) + case ErrOffsetOutOfRange: + // there's no point in retrying this it will just fail the same way again + // shut it down and force the user to choose what to do + child.sendError(result) + Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result) + close(child.trigger) + delete(bc.subscriptions, child) + case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable: + // not an error, but does need redispatching + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", + bc.broker.ID(), child.topic, child.partition, result) + child.trigger <- none{} + delete(bc.subscriptions, child) + default: + // dunno, tell the user and try redispatching + child.sendError(result) + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", + bc.broker.ID(), child.topic, child.partition, result) + child.trigger <- none{} + delete(bc.subscriptions, child) + } + } +} + +func (bc *brokerConsumer) abort(err error) { + bc.consumer.abandonBrokerConsumer(bc) + _ = bc.broker.Close() // we don't care about the error this might return, we already have one + + for child := range bc.subscriptions { + child.sendError(err) + child.trigger <- none{} + } + + for newSubscriptions := range bc.newSubscriptions { + if len(newSubscriptions) == 0 { + <-bc.wait + continue + } + for _, child := range newSubscriptions { + child.sendError(err) + child.trigger <- none{} + } + } +} + +func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { + request := &FetchRequest{ + MinBytes: bc.consumer.conf.Consumer.Fetch.Min, + MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), + } + if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { + request.Version = 2 + } + if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) { + request.Version = 3 + request.MaxBytes = MaxResponseSize + } + if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 4 + request.Isolation = ReadUncommitted // We don't support yet transactions. + } + + for child := range bc.subscriptions { + request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) + } + + return bc.broker.Fetch(request) +} diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go new file mode 100644 index 0000000000..9d92d350a5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go @@ -0,0 +1,94 @@ +package sarama + +type ConsumerGroupMemberMetadata struct { + Version int16 + Topics []string + UserData []byte +} + +func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { + pe.putInt16(m.Version) + + if err := pe.putStringArray(m.Topics); err != nil { + return err + } + + if err := pe.putBytes(m.UserData); err != nil { + return err + } + + return nil +} + +func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { + if m.Version, err = pd.getInt16(); err != nil { + return + } + + if m.Topics, err = pd.getStringArray(); err != nil { + return + } + + if m.UserData, err = pd.getBytes(); err != nil { + return + } + + return nil +} + +type ConsumerGroupMemberAssignment struct { + Version int16 + Topics map[string][]int32 + UserData []byte +} + +func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error { + pe.putInt16(m.Version) + + if err := pe.putArrayLength(len(m.Topics)); err != nil { + return err + } + + for topic, partitions := range m.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + if err := pe.putBytes(m.UserData); err != nil { + return err + } + + return nil +} + +func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) { + if m.Version, err = pd.getInt16(); err != nil { + return + } + + var topicLen int + if topicLen, err = pd.getArrayLength(); err != nil { + return + } + + m.Topics = make(map[string][]int32, topicLen) + for i := 0; i < topicLen; i++ { + var topic string + if topic, err = pd.getString(); err != nil { + return + } + if m.Topics[topic], err = pd.getInt32Array(); err != nil { + return + } + } + + if m.UserData, err = pd.getBytes(); err != nil { + return + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go new file mode 100644 index 0000000000..4de45e7bf5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go @@ -0,0 +1,33 @@ +package sarama + +type ConsumerMetadataRequest struct { + ConsumerGroup string +} + +func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { + tmp := new(FindCoordinatorRequest) + tmp.CoordinatorKey = r.ConsumerGroup + tmp.CoordinatorType = CoordinatorGroup + return tmp.encode(pe) +} + +func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) { + tmp := new(FindCoordinatorRequest) + if err := tmp.decode(pd, version); err != nil { + return err + } + r.ConsumerGroup = tmp.CoordinatorKey + return nil +} + +func (r *ConsumerMetadataRequest) key() int16 { + return 10 +} + +func (r *ConsumerMetadataRequest) version() int16 { + return 0 +} + +func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { + return V0_8_2_0 +} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go new file mode 100644 index 0000000000..442cbde7ac --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go @@ -0,0 +1,77 @@ +package sarama + +import ( + "net" + "strconv" +) + +type ConsumerMetadataResponse struct { + Err KError + Coordinator *Broker + CoordinatorID int32 // deprecated: use Coordinator.ID() + CoordinatorHost string // deprecated: use Coordinator.Addr() + CoordinatorPort int32 // deprecated: use Coordinator.Addr() +} + +func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) { + tmp := new(FindCoordinatorResponse) + + if err := tmp.decode(pd, version); err != nil { + return err + } + + r.Err = tmp.Err + + r.Coordinator = tmp.Coordinator + if tmp.Coordinator == nil { + return nil + } + + // this can all go away in 2.0, but we have to fill in deprecated fields to maintain + // backwards compatibility + host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) + if err != nil { + return err + } + port, err := strconv.ParseInt(portstr, 10, 32) + if err != nil { + return err + } + r.CoordinatorID = r.Coordinator.ID() + r.CoordinatorHost = host + r.CoordinatorPort = int32(port) + + return nil +} + +func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { + if r.Coordinator == nil { + r.Coordinator = new(Broker) + r.Coordinator.id = r.CoordinatorID + r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort))) + } + + tmp := &FindCoordinatorResponse{ + Version: 0, + Err: r.Err, + Coordinator: r.Coordinator, + } + + if err := tmp.encode(pe); err != nil { + return err + } + + return nil +} + +func (r *ConsumerMetadataResponse) key() int16 { + return 10 +} + +func (r *ConsumerMetadataResponse) version() int16 { + return 0 +} + +func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { + return V0_8_2_0 +} diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go new file mode 100644 index 0000000000..1f144431a8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/crc32_field.go @@ -0,0 +1,69 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "hash/crc32" +) + +type crcPolynomial int8 + +const ( + crcIEEE crcPolynomial = iota + crcCastagnoli +) + +var castagnoliTable = crc32.MakeTable(crc32.Castagnoli) + +// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. +type crc32Field struct { + startOffset int + polynomial crcPolynomial +} + +func (c *crc32Field) saveOffset(in int) { + c.startOffset = in +} + +func (c *crc32Field) reserveLength() int { + return 4 +} + +func newCRC32Field(polynomial crcPolynomial) *crc32Field { + return &crc32Field{polynomial: polynomial} +} + +func (c *crc32Field) run(curOffset int, buf []byte) error { + crc, err := c.crc(curOffset, buf) + if err != nil { + return err + } + binary.BigEndian.PutUint32(buf[c.startOffset:], crc) + return nil +} + +func (c *crc32Field) check(curOffset int, buf []byte) error { + crc, err := c.crc(curOffset, buf) + if err != nil { + return err + } + + expected := binary.BigEndian.Uint32(buf[c.startOffset:]) + if crc != expected { + return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)} + } + + return nil +} +func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) { + var tab *crc32.Table + switch c.polynomial { + case crcIEEE: + tab = crc32.IEEETable + case crcCastagnoli: + tab = castagnoliTable + default: + return 0, PacketDecodingError{"invalid CRC type"} + } + return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil +} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go new file mode 100644 index 0000000000..af321e9946 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_partitions_request.go @@ -0,0 +1,121 @@ +package sarama + +import "time" + +type CreatePartitionsRequest struct { + TopicPartitions map[string]*TopicPartition + Timeout time.Duration + ValidateOnly bool +} + +func (c *CreatePartitionsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil { + return err + } + + for topic, partition := range c.TopicPartitions { + if err := pe.putString(topic); err != nil { + return err + } + if err := partition.encode(pe); err != nil { + return err + } + } + + pe.putInt32(int32(c.Timeout / time.Millisecond)) + + pe.putBool(c.ValidateOnly) + + return nil +} + +func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + c.TopicPartitions = make(map[string]*TopicPartition, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicPartitions[topic] = new(TopicPartition) + if err := c.TopicPartitions[topic].decode(pd, version); err != nil { + return err + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + c.Timeout = time.Duration(timeout) * time.Millisecond + + if c.ValidateOnly, err = pd.getBool(); err != nil { + return err + } + + return nil +} + +func (r *CreatePartitionsRequest) key() int16 { + return 37 +} + +func (r *CreatePartitionsRequest) version() int16 { + return 0 +} + +func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type TopicPartition struct { + Count int32 + Assignment [][]int32 +} + +func (t *TopicPartition) encode(pe packetEncoder) error { + pe.putInt32(t.Count) + + if len(t.Assignment) == 0 { + pe.putInt32(-1) + return nil + } + + if err := pe.putArrayLength(len(t.Assignment)); err != nil { + return err + } + + for _, assign := range t.Assignment { + if err := pe.putInt32Array(assign); err != nil { + return err + } + } + + return nil +} + +func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) { + if t.Count, err = pd.getInt32(); err != nil { + return err + } + + n, err := pd.getInt32() + if err != nil { + return err + } + if n <= 0 { + return nil + } + t.Assignment = make([][]int32, n) + + for i := 0; i < int(n); i++ { + if t.Assignment[i], err = pd.getInt32Array(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go new file mode 100644 index 0000000000..abd621c64e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go @@ -0,0 +1,94 @@ +package sarama + +import "time" + +type CreatePartitionsResponse struct { + ThrottleTime time.Duration + TopicPartitionErrors map[string]*TopicPartitionError +} + +func (c *CreatePartitionsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil { + return err + } + + for topic, partitionError := range c.TopicPartitionErrors { + if err := pe.putString(topic); err != nil { + return err + } + if err := partitionError.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicPartitionErrors[topic] = new(TopicPartitionError) + if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (r *CreatePartitionsResponse) key() int16 { + return 37 +} + +func (r *CreatePartitionsResponse) version() int16 { + return 0 +} + +func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type TopicPartitionError struct { + Err KError + ErrMsg *string +} + +func (t *TopicPartitionError) encode(pe packetEncoder) error { + pe.putInt16(int16(t.Err)) + + if err := pe.putNullableString(t.ErrMsg); err != nil { + return err + } + + return nil +} + +func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kerr) + + if t.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go new file mode 100644 index 0000000000..709c0a44e7 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_topics_request.go @@ -0,0 +1,174 @@ +package sarama + +import ( + "time" +) + +type CreateTopicsRequest struct { + Version int16 + + TopicDetails map[string]*TopicDetail + Timeout time.Duration + ValidateOnly bool +} + +func (c *CreateTopicsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.TopicDetails)); err != nil { + return err + } + for topic, detail := range c.TopicDetails { + if err := pe.putString(topic); err != nil { + return err + } + if err := detail.encode(pe); err != nil { + return err + } + } + + pe.putInt32(int32(c.Timeout / time.Millisecond)) + + if c.Version >= 1 { + pe.putBool(c.ValidateOnly) + } + + return nil +} + +func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicDetails = make(map[string]*TopicDetail, n) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicDetails[topic] = new(TopicDetail) + if err = c.TopicDetails[topic].decode(pd, version); err != nil { + return err + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + c.Timeout = time.Duration(timeout) * time.Millisecond + + if version >= 1 { + c.ValidateOnly, err = pd.getBool() + if err != nil { + return err + } + + c.Version = version + } + + return nil +} + +func (c *CreateTopicsRequest) key() int16 { + return 19 +} + +func (c *CreateTopicsRequest) version() int16 { + return c.Version +} + +func (c *CreateTopicsRequest) requiredVersion() KafkaVersion { + switch c.Version { + case 2: + return V1_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} + +type TopicDetail struct { + NumPartitions int32 + ReplicationFactor int16 + ReplicaAssignment map[int32][]int32 + ConfigEntries map[string]*string +} + +func (t *TopicDetail) encode(pe packetEncoder) error { + pe.putInt32(t.NumPartitions) + pe.putInt16(t.ReplicationFactor) + + if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil { + return err + } + for partition, assignment := range t.ReplicaAssignment { + pe.putInt32(partition) + if err := pe.putInt32Array(assignment); err != nil { + return err + } + } + + if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil { + return err + } + for configKey, configValue := range t.ConfigEntries { + if err := pe.putString(configKey); err != nil { + return err + } + if err := pe.putNullableString(configValue); err != nil { + return err + } + } + + return nil +} + +func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) { + if t.NumPartitions, err = pd.getInt32(); err != nil { + return err + } + if t.ReplicationFactor, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.ReplicaAssignment = make(map[int32][]int32, n) + for i := 0; i < n; i++ { + replica, err := pd.getInt32() + if err != nil { + return err + } + if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil { + return err + } + } + } + + n, err = pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.ConfigEntries = make(map[string]*string, n) + for i := 0; i < n; i++ { + configKey, err := pd.getString() + if err != nil { + return err + } + if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go new file mode 100644 index 0000000000..66207e00c5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_topics_response.go @@ -0,0 +1,112 @@ +package sarama + +import "time" + +type CreateTopicsResponse struct { + Version int16 + ThrottleTime time.Duration + TopicErrors map[string]*TopicError +} + +func (c *CreateTopicsResponse) encode(pe packetEncoder) error { + if c.Version >= 2 { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + } + + if err := pe.putArrayLength(len(c.TopicErrors)); err != nil { + return err + } + for topic, topicError := range c.TopicErrors { + if err := pe.putString(topic); err != nil { + return err + } + if err := topicError.encode(pe, c.Version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) { + c.Version = version + + if version >= 2 { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicErrors = make(map[string]*TopicError, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicErrors[topic] = new(TopicError) + if err := c.TopicErrors[topic].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateTopicsResponse) key() int16 { + return 19 +} + +func (c *CreateTopicsResponse) version() int16 { + return c.Version +} + +func (c *CreateTopicsResponse) requiredVersion() KafkaVersion { + switch c.Version { + case 2: + return V1_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} + +type TopicError struct { + Err KError + ErrMsg *string +} + +func (t *TopicError) encode(pe packetEncoder, version int16) error { + pe.putInt16(int16(t.Err)) + + if version >= 1 { + if err := pe.putNullableString(t.ErrMsg); err != nil { + return err + } + } + + return nil +} + +func (t *TopicError) decode(pd packetDecoder, version int16) (err error) { + kErr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kErr) + + if version >= 1 { + if t.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/Shopify/sarama/delete_groups_request.go new file mode 100644 index 0000000000..305a324ac2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_groups_request.go @@ -0,0 +1,30 @@ +package sarama + +type DeleteGroupsRequest struct { + Groups []string +} + +func (r *DeleteGroupsRequest) encode(pe packetEncoder) error { + return pe.putStringArray(r.Groups) +} + +func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Groups, err = pd.getStringArray() + return +} + +func (r *DeleteGroupsRequest) key() int16 { + return 42 +} + +func (r *DeleteGroupsRequest) version() int16 { + return 0 +} + +func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion { + return V1_1_0_0 +} + +func (r *DeleteGroupsRequest) AddGroup(group string) { + r.Groups = append(r.Groups, group) +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/Shopify/sarama/delete_groups_response.go new file mode 100644 index 0000000000..c067ebb42b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_groups_response.go @@ -0,0 +1,70 @@ +package sarama + +import ( + "time" +) + +type DeleteGroupsResponse struct { + ThrottleTime time.Duration + GroupErrorCodes map[string]KError +} + +func (r *DeleteGroupsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil { + return err + } + for groupID, errorCode := range r.GroupErrorCodes { + if err := pe.putString(groupID); err != nil { + return err + } + pe.putInt16(int16(errorCode)) + } + + return nil +} + +func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupErrorCodes = make(map[string]KError, n) + for i := 0; i < n; i++ { + groupID, err := pd.getString() + if err != nil { + return err + } + errorCode, err := pd.getInt16() + if err != nil { + return err + } + + r.GroupErrorCodes[groupID] = KError(errorCode) + } + + return nil +} + +func (r *DeleteGroupsResponse) key() int16 { + return 42 +} + +func (r *DeleteGroupsResponse) version() int16 { + return 0 +} + +func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion { + return V1_1_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/Shopify/sarama/delete_records_request.go new file mode 100644 index 0000000000..93efafd4d0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_records_request.go @@ -0,0 +1,126 @@ +package sarama + +import ( + "sort" + "time" +) + +// request message format is: +// [topic] timeout(int32) +// where topic is: +// name(string) [partition] +// where partition is: +// id(int32) offset(int64) + +type DeleteRecordsRequest struct { + Topics map[string]*DeleteRecordsRequestTopic + Timeout time.Duration +} + +func (d *DeleteRecordsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(d.Topics)); err != nil { + return err + } + keys := make([]string, 0, len(d.Topics)) + for topic := range d.Topics { + keys = append(keys, topic) + } + sort.Strings(keys) + for _, topic := range keys { + if err := pe.putString(topic); err != nil { + return err + } + if err := d.Topics[topic].encode(pe); err != nil { + return err + } + } + pe.putInt32(int32(d.Timeout / time.Millisecond)) + + return nil +} + +func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + d.Topics = make(map[string]*DeleteRecordsRequestTopic, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + details := new(DeleteRecordsRequestTopic) + if err = details.decode(pd, version); err != nil { + return err + } + d.Topics[topic] = details + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + d.Timeout = time.Duration(timeout) * time.Millisecond + + return nil +} + +func (d *DeleteRecordsRequest) key() int16 { + return 21 +} + +func (d *DeleteRecordsRequest) version() int16 { + return 0 +} + +func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type DeleteRecordsRequestTopic struct { + PartitionOffsets map[int32]int64 // partition => offset +} + +func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil { + return err + } + keys := make([]int32, 0, len(t.PartitionOffsets)) + for partition := range t.PartitionOffsets { + keys = append(keys, partition) + } + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + for _, partition := range keys { + pe.putInt32(partition) + pe.putInt64(t.PartitionOffsets[partition]) + } + return nil +} + +func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.PartitionOffsets = make(map[int32]int64, n) + for i := 0; i < n; i++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + offset, err := pd.getInt64() + if err != nil { + return err + } + t.PartitionOffsets[partition] = offset + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/Shopify/sarama/delete_records_response.go new file mode 100644 index 0000000000..733a58b6bc --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_records_response.go @@ -0,0 +1,158 @@ +package sarama + +import ( + "sort" + "time" +) + +// response message format is: +// throttleMs(int32) [topic] +// where topic is: +// name(string) [partition] +// where partition is: +// id(int32) low_watermark(int64) error_code(int16) + +type DeleteRecordsResponse struct { + Version int16 + ThrottleTime time.Duration + Topics map[string]*DeleteRecordsResponseTopic +} + +func (d *DeleteRecordsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(d.Topics)); err != nil { + return err + } + keys := make([]string, 0, len(d.Topics)) + for topic := range d.Topics { + keys = append(keys, topic) + } + sort.Strings(keys) + for _, topic := range keys { + if err := pe.putString(topic); err != nil { + return err + } + if err := d.Topics[topic].encode(pe); err != nil { + return err + } + } + return nil +} + +func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error { + d.Version = version + + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + d.Topics = make(map[string]*DeleteRecordsResponseTopic, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + details := new(DeleteRecordsResponseTopic) + if err = details.decode(pd, version); err != nil { + return err + } + d.Topics[topic] = details + } + } + + return nil +} + +func (d *DeleteRecordsResponse) key() int16 { + return 21 +} + +func (d *DeleteRecordsResponse) version() int16 { + return 0 +} + +func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type DeleteRecordsResponseTopic struct { + Partitions map[int32]*DeleteRecordsResponsePartition +} + +func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(t.Partitions)); err != nil { + return err + } + keys := make([]int32, 0, len(t.Partitions)) + for partition := range t.Partitions { + keys = append(keys, partition) + } + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + for _, partition := range keys { + pe.putInt32(partition) + if err := t.Partitions[partition].encode(pe); err != nil { + return err + } + } + return nil +} + +func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n) + for i := 0; i < n; i++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + details := new(DeleteRecordsResponsePartition) + if err = details.decode(pd, version); err != nil { + return err + } + t.Partitions[partition] = details + } + } + + return nil +} + +type DeleteRecordsResponsePartition struct { + LowWatermark int64 + Err KError +} + +func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error { + pe.putInt64(t.LowWatermark) + pe.putInt16(int16(t.Err)) + return nil +} + +func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error { + lowWatermark, err := pd.getInt64() + if err != nil { + return err + } + t.LowWatermark = lowWatermark + + kErr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kErr) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go new file mode 100644 index 0000000000..911f67d31b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_topics_request.go @@ -0,0 +1,48 @@ +package sarama + +import "time" + +type DeleteTopicsRequest struct { + Version int16 + Topics []string + Timeout time.Duration +} + +func (d *DeleteTopicsRequest) encode(pe packetEncoder) error { + if err := pe.putStringArray(d.Topics); err != nil { + return err + } + pe.putInt32(int32(d.Timeout / time.Millisecond)) + + return nil +} + +func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) { + if d.Topics, err = pd.getStringArray(); err != nil { + return err + } + timeout, err := pd.getInt32() + if err != nil { + return err + } + d.Timeout = time.Duration(timeout) * time.Millisecond + d.Version = version + return nil +} + +func (d *DeleteTopicsRequest) key() int16 { + return 20 +} + +func (d *DeleteTopicsRequest) version() int16 { + return d.Version +} + +func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go new file mode 100644 index 0000000000..34225460a3 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_topics_response.go @@ -0,0 +1,78 @@ +package sarama + +import "time" + +type DeleteTopicsResponse struct { + Version int16 + ThrottleTime time.Duration + TopicErrorCodes map[string]KError +} + +func (d *DeleteTopicsResponse) encode(pe packetEncoder) error { + if d.Version >= 1 { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + } + + if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil { + return err + } + for topic, errorCode := range d.TopicErrorCodes { + if err := pe.putString(topic); err != nil { + return err + } + pe.putInt16(int16(errorCode)) + } + + return nil +} + +func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) { + if version >= 1 { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + d.Version = version + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + d.TopicErrorCodes = make(map[string]KError, n) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + errorCode, err := pd.getInt16() + if err != nil { + return err + } + + d.TopicErrorCodes[topic] = KError(errorCode) + } + + return nil +} + +func (d *DeleteTopicsResponse) key() int16 { + return 20 +} + +func (d *DeleteTopicsResponse) version() int16 { + return d.Version +} + +func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go new file mode 100644 index 0000000000..7a7cffc3fb --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go @@ -0,0 +1,91 @@ +package sarama + +type ConfigResource struct { + Type ConfigResourceType + Name string + ConfigNames []string +} + +type DescribeConfigsRequest struct { + Resources []*ConfigResource +} + +func (r *DescribeConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Resources)); err != nil { + return err + } + + for _, c := range r.Resources { + pe.putInt8(int8(c.Type)) + if err := pe.putString(c.Name); err != nil { + return err + } + + if len(c.ConfigNames) == 0 { + pe.putInt32(-1) + continue + } + if err := pe.putStringArray(c.ConfigNames); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Resources = make([]*ConfigResource, n) + + for i := 0; i < n; i++ { + r.Resources[i] = &ConfigResource{} + t, err := pd.getInt8() + if err != nil { + return err + } + r.Resources[i].Type = ConfigResourceType(t) + name, err := pd.getString() + if err != nil { + return err + } + r.Resources[i].Name = name + + confLength, err := pd.getArrayLength() + + if err != nil { + return err + } + + if confLength == -1 { + continue + } + + cfnames := make([]string, confLength) + for i := 0; i < confLength; i++ { + s, err := pd.getString() + if err != nil { + return err + } + cfnames[i] = s + } + r.Resources[i].ConfigNames = cfnames + } + + return nil +} + +func (r *DescribeConfigsRequest) key() int16 { + return 32 +} + +func (r *DescribeConfigsRequest) version() int16 { + return 0 +} + +func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go new file mode 100644 index 0000000000..6e5d30e4f0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go @@ -0,0 +1,188 @@ +package sarama + +import "time" + +type DescribeConfigsResponse struct { + ThrottleTime time.Duration + Resources []*ResourceResponse +} + +type ResourceResponse struct { + ErrorCode int16 + ErrorMsg string + Type ConfigResourceType + Name string + Configs []*ConfigEntry +} + +type ConfigEntry struct { + Name string + Value string + ReadOnly bool + Default bool + Sensitive bool +} + +func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + if err = pe.putArrayLength(len(r.Resources)); err != nil { + return err + } + + for _, c := range r.Resources { + if err = c.encode(pe); err != nil { + return err + } + } + return nil +} + +func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Resources = make([]*ResourceResponse, n) + for i := 0; i < n; i++ { + rr := &ResourceResponse{} + if err := rr.decode(pd, version); err != nil { + return err + } + r.Resources[i] = rr + } + + return nil +} + +func (r *DescribeConfigsResponse) key() int16 { + return 32 +} + +func (r *DescribeConfigsResponse) version() int16 { + return 0 +} + +func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +func (r *ResourceResponse) encode(pe packetEncoder) (err error) { + pe.putInt16(r.ErrorCode) + + if err = pe.putString(r.ErrorMsg); err != nil { + return err + } + + pe.putInt8(int8(r.Type)) + + if err = pe.putString(r.Name); err != nil { + return err + } + + if err = pe.putArrayLength(len(r.Configs)); err != nil { + return err + } + + for _, c := range r.Configs { + if err = c.encode(pe); err != nil { + return err + } + } + return nil +} + +func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) { + ec, err := pd.getInt16() + if err != nil { + return err + } + r.ErrorCode = ec + + em, err := pd.getString() + if err != nil { + return err + } + r.ErrorMsg = em + + t, err := pd.getInt8() + if err != nil { + return err + } + r.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + r.Name = name + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Configs = make([]*ConfigEntry, n) + for i := 0; i < n; i++ { + c := &ConfigEntry{} + if err := c.decode(pd, version); err != nil { + return err + } + r.Configs[i] = c + } + return nil +} + +func (r *ConfigEntry) encode(pe packetEncoder) (err error) { + if err = pe.putString(r.Name); err != nil { + return err + } + + if err = pe.putString(r.Value); err != nil { + return err + } + + pe.putBool(r.ReadOnly) + pe.putBool(r.Default) + pe.putBool(r.Sensitive) + return nil +} + +func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) { + name, err := pd.getString() + if err != nil { + return err + } + r.Name = name + + value, err := pd.getString() + if err != nil { + return err + } + r.Value = value + + read, err := pd.getBool() + if err != nil { + return err + } + r.ReadOnly = read + + de, err := pd.getBool() + if err != nil { + return err + } + r.Default = de + + sensitive, err := pd.getBool() + if err != nil { + return err + } + r.Sensitive = sensitive + return nil +} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go new file mode 100644 index 0000000000..1fb3567770 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go @@ -0,0 +1,30 @@ +package sarama + +type DescribeGroupsRequest struct { + Groups []string +} + +func (r *DescribeGroupsRequest) encode(pe packetEncoder) error { + return pe.putStringArray(r.Groups) +} + +func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Groups, err = pd.getStringArray() + return +} + +func (r *DescribeGroupsRequest) key() int16 { + return 15 +} + +func (r *DescribeGroupsRequest) version() int16 { + return 0 +} + +func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +func (r *DescribeGroupsRequest) AddGroup(group string) { + r.Groups = append(r.Groups, group) +} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go new file mode 100644 index 0000000000..542b3a9717 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go @@ -0,0 +1,187 @@ +package sarama + +type DescribeGroupsResponse struct { + Groups []*GroupDescription +} + +func (r *DescribeGroupsResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Groups)); err != nil { + return err + } + + for _, groupDescription := range r.Groups { + if err := groupDescription.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Groups = make([]*GroupDescription, n) + for i := 0; i < n; i++ { + r.Groups[i] = new(GroupDescription) + if err := r.Groups[i].decode(pd); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeGroupsResponse) key() int16 { + return 15 +} + +func (r *DescribeGroupsResponse) version() int16 { + return 0 +} + +func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +type GroupDescription struct { + Err KError + GroupId string + State string + ProtocolType string + Protocol string + Members map[string]*GroupMemberDescription +} + +func (gd *GroupDescription) encode(pe packetEncoder) error { + pe.putInt16(int16(gd.Err)) + + if err := pe.putString(gd.GroupId); err != nil { + return err + } + if err := pe.putString(gd.State); err != nil { + return err + } + if err := pe.putString(gd.ProtocolType); err != nil { + return err + } + if err := pe.putString(gd.Protocol); err != nil { + return err + } + + if err := pe.putArrayLength(len(gd.Members)); err != nil { + return err + } + + for memberId, groupMemberDescription := range gd.Members { + if err := pe.putString(memberId); err != nil { + return err + } + if err := groupMemberDescription.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (gd *GroupDescription) decode(pd packetDecoder) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + gd.Err = KError(kerr) + + if gd.GroupId, err = pd.getString(); err != nil { + return + } + if gd.State, err = pd.getString(); err != nil { + return + } + if gd.ProtocolType, err = pd.getString(); err != nil { + return + } + if gd.Protocol, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + gd.Members = make(map[string]*GroupMemberDescription) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + + gd.Members[memberId] = new(GroupMemberDescription) + if err := gd.Members[memberId].decode(pd); err != nil { + return err + } + } + + return nil +} + +type GroupMemberDescription struct { + ClientId string + ClientHost string + MemberMetadata []byte + MemberAssignment []byte +} + +func (gmd *GroupMemberDescription) encode(pe packetEncoder) error { + if err := pe.putString(gmd.ClientId); err != nil { + return err + } + if err := pe.putString(gmd.ClientHost); err != nil { + return err + } + if err := pe.putBytes(gmd.MemberMetadata); err != nil { + return err + } + if err := pe.putBytes(gmd.MemberAssignment); err != nil { + return err + } + + return nil +} + +func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) { + if gmd.ClientId, err = pd.getString(); err != nil { + return + } + if gmd.ClientHost, err = pd.getString(); err != nil { + return + } + if gmd.MemberMetadata, err = pd.getBytes(); err != nil { + return + } + if gmd.MemberAssignment, err = pd.getBytes(); err != nil { + return + } + + return nil +} + +func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { + assignment := new(ConsumerGroupMemberAssignment) + err := decode(gmd.MemberAssignment, assignment) + return assignment, err +} + +func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) { + metadata := new(ConsumerGroupMemberMetadata) + err := decode(gmd.MemberMetadata, metadata) + return metadata, err +} diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml new file mode 100644 index 0000000000..294fcdb413 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/dev.yml @@ -0,0 +1,10 @@ +name: sarama + +up: + - go: + version: '1.9' + +commands: + test: + run: make test + desc: 'run unit tests' diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go new file mode 100644 index 0000000000..7ce3bc0f6e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go @@ -0,0 +1,89 @@ +package sarama + +import ( + "fmt" + + "github.com/rcrowley/go-metrics" +) + +// Encoder is the interface that wraps the basic Encode method. +// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules. +type encoder interface { + encode(pe packetEncoder) error +} + +// Encode takes an Encoder and turns it into bytes while potentially recording metrics. +func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) { + if e == nil { + return nil, nil + } + + var prepEnc prepEncoder + var realEnc realEncoder + + err := e.encode(&prepEnc) + if err != nil { + return nil, err + } + + if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) { + return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)} + } + + realEnc.raw = make([]byte, prepEnc.length) + realEnc.registry = metricRegistry + err = e.encode(&realEnc) + if err != nil { + return nil, err + } + + return realEnc.raw, nil +} + +// Decoder is the interface that wraps the basic Decode method. +// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules. +type decoder interface { + decode(pd packetDecoder) error +} + +type versionedDecoder interface { + decode(pd packetDecoder, version int16) error +} + +// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes, +// interpreted using Kafka's encoding rules. +func decode(buf []byte, in decoder) error { + if buf == nil { + return nil + } + + helper := realDecoder{raw: buf} + err := in.decode(&helper) + if err != nil { + return err + } + + if helper.off != len(buf) { + return PacketDecodingError{"invalid length"} + } + + return nil +} + +func versionedDecode(buf []byte, in versionedDecoder, version int16) error { + if buf == nil { + return nil + } + + helper := realDecoder{raw: buf} + err := in.decode(&helper, version) + if err != nil { + return err + } + + if helper.off != len(buf) { + return PacketDecodingError{"invalid length"} + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go new file mode 100644 index 0000000000..2cd9b506d3 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/end_txn_request.go @@ -0,0 +1,50 @@ +package sarama + +type EndTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + TransactionResult bool +} + +func (a *EndTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + + pe.putInt64(a.ProducerID) + + pe.putInt16(a.ProducerEpoch) + + pe.putBool(a.TransactionResult) + + return nil +} + +func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + if a.TransactionResult, err = pd.getBool(); err != nil { + return err + } + return nil +} + +func (a *EndTxnRequest) key() int16 { + return 26 +} + +func (a *EndTxnRequest) version() int16 { + return 0 +} + +func (a *EndTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go new file mode 100644 index 0000000000..33b27e33d4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/end_txn_response.go @@ -0,0 +1,44 @@ +package sarama + +import ( + "time" +) + +type EndTxnResponse struct { + ThrottleTime time.Duration + Err KError +} + +func (e *EndTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(e.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(e.Err)) + return nil +} + +func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + e.Err = KError(kerr) + + return nil +} + +func (e *EndTxnResponse) key() int16 { + return 25 +} + +func (e *EndTxnResponse) version() int16 { + return 0 +} + +func (e *EndTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go new file mode 100644 index 0000000000..c578ef5fb4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/errors.go @@ -0,0 +1,281 @@ +package sarama + +import ( + "errors" + "fmt" +) + +// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored +// or otherwise failed to respond. +var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)") + +// ErrClosedClient is the error returned when a method is called on a client that has been closed. +var ErrClosedClient = errors.New("kafka: tried to use a client that was closed") + +// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does +// not contain the expected information. +var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks") + +// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index +// (meaning one outside of the range [0...numPartitions-1]). +var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index") + +// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting. +var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated") + +// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected. +var ErrNotConnected = errors.New("kafka: broker not connected") + +// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected +// when requesting messages, since as an optimization the server is allowed to return a partial message at the end +// of the message set. +var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected") + +// ErrShuttingDown is returned when a producer receives a message during shutdown. +var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down") + +// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max +var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max") + +// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing +// a RecordBatch. +var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch") + +// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version +// is lower than 0.10.0.0. +var ErrControllerNotAvailable = errors.New("kafka: controller is not available") + +// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update +// the metadata. +var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata") + +// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, +// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. +type PacketEncodingError struct { + Info string +} + +func (err PacketEncodingError) Error() string { + return fmt.Sprintf("kafka: error encoding packet: %s", err.Info) +} + +// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response. +// This can be a bad CRC or length field, or any other invalid value. +type PacketDecodingError struct { + Info string +} + +func (err PacketDecodingError) Error() string { + return fmt.Sprintf("kafka: error decoding packet: %s", err.Info) +} + +// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer) +// when the specified configuration is invalid. +type ConfigurationError string + +func (err ConfigurationError) Error() string { + return "kafka: invalid configuration (" + string(err) + ")" +} + +// KError is the type of error that can be returned directly by the Kafka broker. +// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes +type KError int16 + +// Numeric error codes returned by the Kafka server. +const ( + ErrNoError KError = 0 + ErrUnknown KError = -1 + ErrOffsetOutOfRange KError = 1 + ErrInvalidMessage KError = 2 + ErrUnknownTopicOrPartition KError = 3 + ErrInvalidMessageSize KError = 4 + ErrLeaderNotAvailable KError = 5 + ErrNotLeaderForPartition KError = 6 + ErrRequestTimedOut KError = 7 + ErrBrokerNotAvailable KError = 8 + ErrReplicaNotAvailable KError = 9 + ErrMessageSizeTooLarge KError = 10 + ErrStaleControllerEpochCode KError = 11 + ErrOffsetMetadataTooLarge KError = 12 + ErrNetworkException KError = 13 + ErrOffsetsLoadInProgress KError = 14 + ErrConsumerCoordinatorNotAvailable KError = 15 + ErrNotCoordinatorForConsumer KError = 16 + ErrInvalidTopic KError = 17 + ErrMessageSetSizeTooLarge KError = 18 + ErrNotEnoughReplicas KError = 19 + ErrNotEnoughReplicasAfterAppend KError = 20 + ErrInvalidRequiredAcks KError = 21 + ErrIllegalGeneration KError = 22 + ErrInconsistentGroupProtocol KError = 23 + ErrInvalidGroupId KError = 24 + ErrUnknownMemberId KError = 25 + ErrInvalidSessionTimeout KError = 26 + ErrRebalanceInProgress KError = 27 + ErrInvalidCommitOffsetSize KError = 28 + ErrTopicAuthorizationFailed KError = 29 + ErrGroupAuthorizationFailed KError = 30 + ErrClusterAuthorizationFailed KError = 31 + ErrInvalidTimestamp KError = 32 + ErrUnsupportedSASLMechanism KError = 33 + ErrIllegalSASLState KError = 34 + ErrUnsupportedVersion KError = 35 + ErrTopicAlreadyExists KError = 36 + ErrInvalidPartitions KError = 37 + ErrInvalidReplicationFactor KError = 38 + ErrInvalidReplicaAssignment KError = 39 + ErrInvalidConfig KError = 40 + ErrNotController KError = 41 + ErrInvalidRequest KError = 42 + ErrUnsupportedForMessageFormat KError = 43 + ErrPolicyViolation KError = 44 + ErrOutOfOrderSequenceNumber KError = 45 + ErrDuplicateSequenceNumber KError = 46 + ErrInvalidProducerEpoch KError = 47 + ErrInvalidTxnState KError = 48 + ErrInvalidProducerIDMapping KError = 49 + ErrInvalidTransactionTimeout KError = 50 + ErrConcurrentTransactions KError = 51 + ErrTransactionCoordinatorFenced KError = 52 + ErrTransactionalIDAuthorizationFailed KError = 53 + ErrSecurityDisabled KError = 54 + ErrOperationNotAttempted KError = 55 + ErrKafkaStorageError KError = 56 + ErrLogDirNotFound KError = 57 + ErrSASLAuthenticationFailed KError = 58 + ErrUnknownProducerID KError = 59 + ErrReassignmentInProgress KError = 60 +) + +func (err KError) Error() string { + // Error messages stolen/adapted from + // https://kafka.apache.org/protocol#protocol_error_codes + switch err { + case ErrNoError: + return "kafka server: Not an error, why are you printing me?" + case ErrUnknown: + return "kafka server: Unexpected (unknown?) server error." + case ErrOffsetOutOfRange: + return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition." + case ErrInvalidMessage: + return "kafka server: Message contents does not match its CRC." + case ErrUnknownTopicOrPartition: + return "kafka server: Request was for a topic or partition that does not exist on this broker." + case ErrInvalidMessageSize: + return "kafka server: The message has a negative size." + case ErrLeaderNotAvailable: + return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes." + case ErrNotLeaderForPartition: + return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date." + case ErrRequestTimedOut: + return "kafka server: Request exceeded the user-specified time limit in the request." + case ErrBrokerNotAvailable: + return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!" + case ErrReplicaNotAvailable: + return "kafka server: Replica information not available, one or more brokers are down." + case ErrMessageSizeTooLarge: + return "kafka server: Message was too large, server rejected it to avoid allocation error." + case ErrStaleControllerEpochCode: + return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)." + case ErrOffsetMetadataTooLarge: + return "kafka server: Specified a string larger than the configured maximum for offset metadata." + case ErrNetworkException: + return "kafka server: The server disconnected before a response was received." + case ErrOffsetsLoadInProgress: + return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition." + case ErrConsumerCoordinatorNotAvailable: + return "kafka server: Offset's topic has not yet been created." + case ErrNotCoordinatorForConsumer: + return "kafka server: Request was for a consumer group that is not coordinated by this broker." + case ErrInvalidTopic: + return "kafka server: The request attempted to perform an operation on an invalid topic." + case ErrMessageSetSizeTooLarge: + return "kafka server: The request included message batch larger than the configured segment size on the server." + case ErrNotEnoughReplicas: + return "kafka server: Messages are rejected since there are fewer in-sync replicas than required." + case ErrNotEnoughReplicasAfterAppend: + return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required." + case ErrInvalidRequiredAcks: + return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)." + case ErrIllegalGeneration: + return "kafka server: The provided generation id is not the current generation." + case ErrInconsistentGroupProtocol: + return "kafka server: The provider group protocol type is incompatible with the other members." + case ErrInvalidGroupId: + return "kafka server: The provided group id was empty." + case ErrUnknownMemberId: + return "kafka server: The provided member is not known in the current generation." + case ErrInvalidSessionTimeout: + return "kafka server: The provided session timeout is outside the allowed range." + case ErrRebalanceInProgress: + return "kafka server: A rebalance for the group is in progress. Please re-join the group." + case ErrInvalidCommitOffsetSize: + return "kafka server: The provided commit metadata was too large." + case ErrTopicAuthorizationFailed: + return "kafka server: The client is not authorized to access this topic." + case ErrGroupAuthorizationFailed: + return "kafka server: The client is not authorized to access this group." + case ErrClusterAuthorizationFailed: + return "kafka server: The client is not authorized to send this request type." + case ErrInvalidTimestamp: + return "kafka server: The timestamp of the message is out of acceptable range." + case ErrUnsupportedSASLMechanism: + return "kafka server: The broker does not support the requested SASL mechanism." + case ErrIllegalSASLState: + return "kafka server: Request is not valid given the current SASL state." + case ErrUnsupportedVersion: + return "kafka server: The version of API is not supported." + case ErrTopicAlreadyExists: + return "kafka server: Topic with this name already exists." + case ErrInvalidPartitions: + return "kafka server: Number of partitions is invalid." + case ErrInvalidReplicationFactor: + return "kafka server: Replication-factor is invalid." + case ErrInvalidReplicaAssignment: + return "kafka server: Replica assignment is invalid." + case ErrInvalidConfig: + return "kafka server: Configuration is invalid." + case ErrNotController: + return "kafka server: This is not the correct controller for this cluster." + case ErrInvalidRequest: + return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details." + case ErrUnsupportedForMessageFormat: + return "kafka server: The requested operation is not supported by the message format version." + case ErrPolicyViolation: + return "kafka server: Request parameters do not satisfy the configured policy." + case ErrOutOfOrderSequenceNumber: + return "kafka server: The broker received an out of order sequence number." + case ErrDuplicateSequenceNumber: + return "kafka server: The broker received a duplicate sequence number." + case ErrInvalidProducerEpoch: + return "kafka server: Producer attempted an operation with an old epoch." + case ErrInvalidTxnState: + return "kafka server: The producer attempted a transactional operation in an invalid state." + case ErrInvalidProducerIDMapping: + return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id." + case ErrInvalidTransactionTimeout: + return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)." + case ErrConcurrentTransactions: + return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing." + case ErrTransactionCoordinatorFenced: + return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer." + case ErrTransactionalIDAuthorizationFailed: + return "kafka server: Transactional ID authorization failed." + case ErrSecurityDisabled: + return "kafka server: Security features are disabled." + case ErrOperationNotAttempted: + return "kafka server: The broker did not attempt to execute this operation." + case ErrKafkaStorageError: + return "kafka server: Disk error when trying to access log file on the disk." + case ErrLogDirNotFound: + return "kafka server: The specified log directory is not found in the broker config." + case ErrSASLAuthenticationFailed: + return "kafka server: SASL Authentication failed." + case ErrUnknownProducerID: + return "kafka server: The broker could not locate the producer metadata associated with the Producer ID." + case ErrReassignmentInProgress: + return "kafka server: A partition reassignment is in progress." + } + + return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) +} diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go new file mode 100644 index 0000000000..462ab8afbb --- /dev/null +++ b/vendor/github.com/Shopify/sarama/fetch_request.go @@ -0,0 +1,170 @@ +package sarama + +type fetchRequestBlock struct { + fetchOffset int64 + maxBytes int32 +} + +func (b *fetchRequestBlock) encode(pe packetEncoder) error { + pe.putInt64(b.fetchOffset) + pe.putInt32(b.maxBytes) + return nil +} + +func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) { + if b.fetchOffset, err = pd.getInt64(); err != nil { + return err + } + if b.maxBytes, err = pd.getInt32(); err != nil { + return err + } + return nil +} + +// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See +// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes +type FetchRequest struct { + MaxWaitTime int32 + MinBytes int32 + MaxBytes int32 + Version int16 + Isolation IsolationLevel + blocks map[string]map[int32]*fetchRequestBlock +} + +type IsolationLevel int8 + +const ( + ReadUncommitted IsolationLevel = 0 + ReadCommitted IsolationLevel = 1 +) + +func (r *FetchRequest) encode(pe packetEncoder) (err error) { + pe.putInt32(-1) // replica ID is always -1 for clients + pe.putInt32(r.MaxWaitTime) + pe.putInt32(r.MinBytes) + if r.Version >= 3 { + pe.putInt32(r.MaxBytes) + } + if r.Version >= 4 { + pe.putInt8(int8(r.Isolation)) + } + err = pe.putArrayLength(len(r.blocks)) + if err != nil { + return err + } + for topic, blocks := range r.blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(blocks)) + if err != nil { + return err + } + for partition, block := range blocks { + pe.putInt32(partition) + err = block.encode(pe) + if err != nil { + return err + } + } + } + return nil +} + +func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if _, err = pd.getInt32(); err != nil { + return err + } + if r.MaxWaitTime, err = pd.getInt32(); err != nil { + return err + } + if r.MinBytes, err = pd.getInt32(); err != nil { + return err + } + if r.Version >= 3 { + if r.MaxBytes, err = pd.getInt32(); err != nil { + return err + } + } + if r.Version >= 4 { + isolation, err := pd.getInt8() + if err != nil { + return err + } + r.Isolation = IsolationLevel(isolation) + } + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*fetchRequestBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*fetchRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + fetchBlock := &fetchRequestBlock{} + if err = fetchBlock.decode(pd); err != nil { + return err + } + r.blocks[topic][partition] = fetchBlock + } + } + return nil +} + +func (r *FetchRequest) key() int16 { + return 1 +} + +func (r *FetchRequest) version() int16 { + return r.Version +} + +func (r *FetchRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_10_1_0 + case 4: + return V0_11_0_0 + default: + return MinVersion + } +} + +func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*fetchRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*fetchRequestBlock) + } + + tmp := new(fetchRequestBlock) + tmp.maxBytes = maxBytes + tmp.fetchOffset = fetchOffset + + r.blocks[topic][partitionID] = tmp +} diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go new file mode 100644 index 0000000000..ae91bb9eb0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/fetch_response.go @@ -0,0 +1,385 @@ +package sarama + +import ( + "time" +) + +type AbortedTransaction struct { + ProducerID int64 + FirstOffset int64 +} + +func (t *AbortedTransaction) decode(pd packetDecoder) (err error) { + if t.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if t.FirstOffset, err = pd.getInt64(); err != nil { + return err + } + + return nil +} + +func (t *AbortedTransaction) encode(pe packetEncoder) (err error) { + pe.putInt64(t.ProducerID) + pe.putInt64(t.FirstOffset) + + return nil +} + +type FetchResponseBlock struct { + Err KError + HighWaterMarkOffset int64 + LastStableOffset int64 + AbortedTransactions []*AbortedTransaction + Records *Records // deprecated: use FetchResponseBlock.Records + RecordsSet []*Records + Partial bool +} + +func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + b.HighWaterMarkOffset, err = pd.getInt64() + if err != nil { + return err + } + + if version >= 4 { + b.LastStableOffset, err = pd.getInt64() + if err != nil { + return err + } + + numTransact, err := pd.getArrayLength() + if err != nil { + return err + } + + if numTransact >= 0 { + b.AbortedTransactions = make([]*AbortedTransaction, numTransact) + } + + for i := 0; i < numTransact; i++ { + transact := new(AbortedTransaction) + if err = transact.decode(pd); err != nil { + return err + } + b.AbortedTransactions[i] = transact + } + } + + recordsSize, err := pd.getInt32() + if err != nil { + return err + } + + recordsDecoder, err := pd.getSubset(int(recordsSize)) + if err != nil { + return err + } + + b.RecordsSet = []*Records{} + + for recordsDecoder.remaining() > 0 { + records := &Records{} + if err := records.decode(recordsDecoder); err != nil { + // If we have at least one decoded records, this is not an error + if err == ErrInsufficientData { + if len(b.RecordsSet) == 0 { + b.Partial = true + } + break + } + return err + } + + partial, err := records.isPartial() + if err != nil { + return err + } + + // If we have at least one full records, we skip incomplete ones + if partial && len(b.RecordsSet) > 0 { + break + } + + b.RecordsSet = append(b.RecordsSet, records) + + if b.Records == nil { + b.Records = records + } + } + + return nil +} + +func (b *FetchResponseBlock) numRecords() (int, error) { + sum := 0 + + for _, records := range b.RecordsSet { + count, err := records.numRecords() + if err != nil { + return 0, err + } + + sum += count + } + + return sum, nil +} + +func (b *FetchResponseBlock) isPartial() (bool, error) { + if b.Partial { + return true, nil + } + + if len(b.RecordsSet) == 1 { + return b.RecordsSet[0].isPartial() + } + + return false, nil +} + +func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + + pe.putInt64(b.HighWaterMarkOffset) + + if version >= 4 { + pe.putInt64(b.LastStableOffset) + + if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil { + return err + } + for _, transact := range b.AbortedTransactions { + if err = transact.encode(pe); err != nil { + return err + } + } + } + + pe.push(&lengthField{}) + for _, records := range b.RecordsSet { + err = records.encode(pe) + if err != nil { + return err + } + } + return pe.pop() +} + +type FetchResponse struct { + Blocks map[string]map[int32]*FetchResponseBlock + ThrottleTime time.Duration + Version int16 // v1 requires 0.9+, v2 requires 0.10+ +} + +func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.Version >= 1 { + throttle, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttle) * time.Millisecond + } + + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(FetchResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *FetchResponse) encode(pe packetEncoder) (err error) { + if r.Version >= 1 { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + } + + err = pe.putArrayLength(len(r.Blocks)) + if err != nil { + return err + } + + for topic, partitions := range r.Blocks { + err = pe.putString(topic) + if err != nil { + return err + } + + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + + for id, block := range partitions { + pe.putInt32(id) + err = block.encode(pe, r.Version) + if err != nil { + return err + } + } + + } + return nil +} + +func (r *FetchResponse) key() int16 { + return 1 +} + +func (r *FetchResponse) version() int16 { + return r.Version +} + +func (r *FetchResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_10_1_0 + case 4: + return V0_11_0_0 + default: + return MinVersion + } +} + +func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +func (r *FetchResponse) AddError(topic string, partition int32, err KError) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*FetchResponseBlock) + } + partitions, ok := r.Blocks[topic] + if !ok { + partitions = make(map[int32]*FetchResponseBlock) + r.Blocks[topic] = partitions + } + frb, ok := partitions[partition] + if !ok { + frb = new(FetchResponseBlock) + partitions[partition] = frb + } + frb.Err = err +} + +func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*FetchResponseBlock) + } + partitions, ok := r.Blocks[topic] + if !ok { + partitions = make(map[int32]*FetchResponseBlock) + r.Blocks[topic] = partitions + } + frb, ok := partitions[partition] + if !ok { + frb = new(FetchResponseBlock) + partitions[partition] = frb + } + + return frb +} + +func encodeKV(key, value Encoder) ([]byte, []byte) { + var kb []byte + var vb []byte + if key != nil { + kb, _ = key.Encode() + } + if value != nil { + vb, _ = value.Encode() + } + + return kb, vb +} + +func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) + msg := &Message{Key: kb, Value: vb} + msgBlock := &MessageBlock{Msg: msg, Offset: offset} + if len(frb.RecordsSet) == 0 { + records := newLegacyRecords(&MessageSet{}) + frb.RecordsSet = []*Records{&records} + } + set := frb.RecordsSet[0].MsgSet + set.Messages = append(set.Messages, msgBlock) +} + +func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) + rec := &Record{Key: kb, Value: vb, OffsetDelta: offset} + if len(frb.RecordsSet) == 0 { + records := newDefaultRecords(&RecordBatch{Version: 2}) + frb.RecordsSet = []*Records{&records} + } + batch := frb.RecordsSet[0].RecordBatch + batch.addRecord(rec) +} + +func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) { + frb := r.getOrCreateBlock(topic, partition) + if len(frb.RecordsSet) == 0 { + records := newDefaultRecords(&RecordBatch{Version: 2}) + frb.RecordsSet = []*Records{&records} + } + batch := frb.RecordsSet[0].RecordBatch + batch.LastOffsetDelta = offset +} + +func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) { + frb := r.getOrCreateBlock(topic, partition) + frb.LastStableOffset = offset +} diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go new file mode 100644 index 0000000000..0ab5cb5ff5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/find_coordinator_request.go @@ -0,0 +1,61 @@ +package sarama + +type CoordinatorType int8 + +const ( + CoordinatorGroup CoordinatorType = 0 + CoordinatorTransaction CoordinatorType = 1 +) + +type FindCoordinatorRequest struct { + Version int16 + CoordinatorKey string + CoordinatorType CoordinatorType +} + +func (f *FindCoordinatorRequest) encode(pe packetEncoder) error { + if err := pe.putString(f.CoordinatorKey); err != nil { + return err + } + + if f.Version >= 1 { + pe.putInt8(int8(f.CoordinatorType)) + } + + return nil +} + +func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) { + if f.CoordinatorKey, err = pd.getString(); err != nil { + return err + } + + if version >= 1 { + f.Version = version + coordinatorType, err := pd.getInt8() + if err != nil { + return err + } + + f.CoordinatorType = CoordinatorType(coordinatorType) + } + + return nil +} + +func (f *FindCoordinatorRequest) key() int16 { + return 10 +} + +func (f *FindCoordinatorRequest) version() int16 { + return f.Version +} + +func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion { + switch f.Version { + case 1: + return V0_11_0_0 + default: + return V0_8_2_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/Shopify/sarama/find_coordinator_response.go new file mode 100644 index 0000000000..9c900e8b77 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/find_coordinator_response.go @@ -0,0 +1,92 @@ +package sarama + +import ( + "time" +) + +var NoNode = &Broker{id: -1, addr: ":-1"} + +type FindCoordinatorResponse struct { + Version int16 + ThrottleTime time.Duration + Err KError + ErrMsg *string + Coordinator *Broker +} + +func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) { + if version >= 1 { + f.Version = version + + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + f.Err = KError(tmp) + + if version >= 1 { + if f.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + } + + coordinator := new(Broker) + // The version is hardcoded to 0, as version 1 of the Broker-decode + // contains the rack-field which is not present in the FindCoordinatorResponse. + if err := coordinator.decode(pd, 0); err != nil { + return err + } + if coordinator.addr == ":0" { + return nil + } + f.Coordinator = coordinator + + return nil +} + +func (f *FindCoordinatorResponse) encode(pe packetEncoder) error { + if f.Version >= 1 { + pe.putInt32(int32(f.ThrottleTime / time.Millisecond)) + } + + pe.putInt16(int16(f.Err)) + + if f.Version >= 1 { + if err := pe.putNullableString(f.ErrMsg); err != nil { + return err + } + } + + coordinator := f.Coordinator + if coordinator == nil { + coordinator = NoNode + } + if err := coordinator.encode(pe, 0); err != nil { + return err + } + return nil +} + +func (f *FindCoordinatorResponse) key() int16 { + return 10 +} + +func (f *FindCoordinatorResponse) version() int16 { + return f.Version +} + +func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion { + switch f.Version { + case 1: + return V0_11_0_0 + default: + return V0_8_2_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go new file mode 100644 index 0000000000..ce49c47397 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go @@ -0,0 +1,47 @@ +package sarama + +type HeartbeatRequest struct { + GroupId string + GenerationId int32 + MemberId string +} + +func (r *HeartbeatRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.MemberId); err != nil { + return err + } + + return nil +} + +func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + return nil +} + +func (r *HeartbeatRequest) key() int16 { + return 12 +} + +func (r *HeartbeatRequest) version() int16 { + return 0 +} + +func (r *HeartbeatRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go new file mode 100644 index 0000000000..766f5fdec6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go @@ -0,0 +1,32 @@ +package sarama + +type HeartbeatResponse struct { + Err KError +} + +func (r *HeartbeatResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return nil +} + +func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(kerr) + + return nil +} + +func (r *HeartbeatResponse) key() int16 { + return 12 +} + +func (r *HeartbeatResponse) version() int16 { + return 0 +} + +func (r *HeartbeatResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go new file mode 100644 index 0000000000..8ceb6c2325 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go @@ -0,0 +1,43 @@ +package sarama + +import "time" + +type InitProducerIDRequest struct { + TransactionalID *string + TransactionTimeout time.Duration +} + +func (i *InitProducerIDRequest) encode(pe packetEncoder) error { + if err := pe.putNullableString(i.TransactionalID); err != nil { + return err + } + pe.putInt32(int32(i.TransactionTimeout / time.Millisecond)) + + return nil +} + +func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) { + if i.TransactionalID, err = pd.getNullableString(); err != nil { + return err + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + i.TransactionTimeout = time.Duration(timeout) * time.Millisecond + + return nil +} + +func (i *InitProducerIDRequest) key() int16 { + return 22 +} + +func (i *InitProducerIDRequest) version() int16 { + return 0 +} + +func (i *InitProducerIDRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go new file mode 100644 index 0000000000..1b32eb085b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go @@ -0,0 +1,55 @@ +package sarama + +import "time" + +type InitProducerIDResponse struct { + ThrottleTime time.Duration + Err KError + ProducerID int64 + ProducerEpoch int16 +} + +func (i *InitProducerIDResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(i.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(i.Err)) + pe.putInt64(i.ProducerID) + pe.putInt16(i.ProducerEpoch) + + return nil +} + +func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + i.Err = KError(kerr) + + if i.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if i.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + return nil +} + +func (i *InitProducerIDResponse) key() int16 { + return 22 +} + +func (i *InitProducerIDResponse) version() int16 { + return 0 +} + +func (i *InitProducerIDResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go new file mode 100644 index 0000000000..97e9299ea1 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/join_group_request.go @@ -0,0 +1,163 @@ +package sarama + +type GroupProtocol struct { + Name string + Metadata []byte +} + +func (p *GroupProtocol) decode(pd packetDecoder) (err error) { + p.Name, err = pd.getString() + if err != nil { + return err + } + p.Metadata, err = pd.getBytes() + return err +} + +func (p *GroupProtocol) encode(pe packetEncoder) (err error) { + if err := pe.putString(p.Name); err != nil { + return err + } + if err := pe.putBytes(p.Metadata); err != nil { + return err + } + return nil +} + +type JoinGroupRequest struct { + Version int16 + GroupId string + SessionTimeout int32 + RebalanceTimeout int32 + MemberId string + ProtocolType string + GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols + OrderedGroupProtocols []*GroupProtocol +} + +func (r *JoinGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + pe.putInt32(r.SessionTimeout) + if r.Version >= 1 { + pe.putInt32(r.RebalanceTimeout) + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + if err := pe.putString(r.ProtocolType); err != nil { + return err + } + + if len(r.GroupProtocols) > 0 { + if len(r.OrderedGroupProtocols) > 0 { + return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"} + } + + if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil { + return err + } + for name, metadata := range r.GroupProtocols { + if err := pe.putString(name); err != nil { + return err + } + if err := pe.putBytes(metadata); err != nil { + return err + } + } + } else { + if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil { + return err + } + for _, protocol := range r.OrderedGroupProtocols { + if err := protocol.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.GroupId, err = pd.getString(); err != nil { + return + } + + if r.SessionTimeout, err = pd.getInt32(); err != nil { + return + } + + if version >= 1 { + if r.RebalanceTimeout, err = pd.getInt32(); err != nil { + return err + } + } + + if r.MemberId, err = pd.getString(); err != nil { + return + } + + if r.ProtocolType, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupProtocols = make(map[string][]byte) + for i := 0; i < n; i++ { + protocol := &GroupProtocol{} + if err := protocol.decode(pd); err != nil { + return err + } + r.GroupProtocols[protocol.Name] = protocol.Metadata + r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol) + } + + return nil +} + +func (r *JoinGroupRequest) key() int16 { + return 11 +} + +func (r *JoinGroupRequest) version() int16 { + return r.Version +} + +func (r *JoinGroupRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 2: + return V0_11_0_0 + case 1: + return V0_10_1_0 + default: + return V0_9_0_0 + } +} + +func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { + r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{ + Name: name, + Metadata: metadata, + }) +} + +func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error { + bin, err := encode(metadata, nil) + if err != nil { + return err + } + + r.AddGroupProtocol(name, bin) + return nil +} diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go new file mode 100644 index 0000000000..5752acc8ae --- /dev/null +++ b/vendor/github.com/Shopify/sarama/join_group_response.go @@ -0,0 +1,135 @@ +package sarama + +type JoinGroupResponse struct { + Version int16 + ThrottleTime int32 + Err KError + GenerationId int32 + GroupProtocol string + LeaderId string + MemberId string + Members map[string][]byte +} + +func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) { + members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members)) + for id, bin := range r.Members { + meta := new(ConsumerGroupMemberMetadata) + if err := decode(bin, meta); err != nil { + return nil, err + } + members[id] = *meta + } + return members, nil +} + +func (r *JoinGroupResponse) encode(pe packetEncoder) error { + if r.Version >= 2 { + pe.putInt32(r.ThrottleTime) + } + pe.putInt16(int16(r.Err)) + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.GroupProtocol); err != nil { + return err + } + if err := pe.putString(r.LeaderId); err != nil { + return err + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Members)); err != nil { + return err + } + + for memberId, memberMetadata := range r.Members { + if err := pe.putString(memberId); err != nil { + return err + } + + if err := pe.putBytes(memberMetadata); err != nil { + return err + } + } + + return nil +} + +func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if version >= 2 { + if r.ThrottleTime, err = pd.getInt32(); err != nil { + return + } + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + + if r.GroupProtocol, err = pd.getString(); err != nil { + return + } + + if r.LeaderId, err = pd.getString(); err != nil { + return + } + + if r.MemberId, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.Members = make(map[string][]byte) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + + memberMetadata, err := pd.getBytes() + if err != nil { + return err + } + + r.Members[memberId] = memberMetadata + } + + return nil +} + +func (r *JoinGroupResponse) key() int16 { + return 11 +} + +func (r *JoinGroupResponse) version() int16 { + return r.Version +} + +func (r *JoinGroupResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 2: + return V0_11_0_0 + case 1: + return V0_10_1_0 + default: + return V0_9_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go new file mode 100644 index 0000000000..e177427482 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/leave_group_request.go @@ -0,0 +1,40 @@ +package sarama + +type LeaveGroupRequest struct { + GroupId string + MemberId string +} + +func (r *LeaveGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + + return nil +} + +func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + return nil +} + +func (r *LeaveGroupRequest) key() int16 { + return 13 +} + +func (r *LeaveGroupRequest) version() int16 { + return 0 +} + +func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go new file mode 100644 index 0000000000..d60c626da0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/leave_group_response.go @@ -0,0 +1,32 @@ +package sarama + +type LeaveGroupResponse struct { + Err KError +} + +func (r *LeaveGroupResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return nil +} + +func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(kerr) + + return nil +} + +func (r *LeaveGroupResponse) key() int16 { + return 13 +} + +func (r *LeaveGroupResponse) version() int16 { + return 0 +} + +func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go new file mode 100644 index 0000000000..576b1a6f6f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/length_field.go @@ -0,0 +1,69 @@ +package sarama + +import "encoding/binary" + +// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths. +type lengthField struct { + startOffset int +} + +func (l *lengthField) saveOffset(in int) { + l.startOffset = in +} + +func (l *lengthField) reserveLength() int { + return 4 +} + +func (l *lengthField) run(curOffset int, buf []byte) error { + binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4)) + return nil +} + +func (l *lengthField) check(curOffset int, buf []byte) error { + if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) { + return PacketDecodingError{"length field invalid"} + } + + return nil +} + +type varintLengthField struct { + startOffset int + length int64 +} + +func (l *varintLengthField) decode(pd packetDecoder) error { + var err error + l.length, err = pd.getVarint() + return err +} + +func (l *varintLengthField) saveOffset(in int) { + l.startOffset = in +} + +func (l *varintLengthField) adjustLength(currOffset int) int { + oldFieldSize := l.reserveLength() + l.length = int64(currOffset - l.startOffset - oldFieldSize) + + return l.reserveLength() - oldFieldSize +} + +func (l *varintLengthField) reserveLength() int { + var tmp [binary.MaxVarintLen64]byte + return binary.PutVarint(tmp[:], l.length) +} + +func (l *varintLengthField) run(curOffset int, buf []byte) error { + binary.PutVarint(buf[l.startOffset:], l.length) + return nil +} + +func (l *varintLengthField) check(curOffset int, buf []byte) error { + if int64(curOffset-l.startOffset-l.reserveLength()) != l.length { + return PacketDecodingError{"length field invalid"} + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go new file mode 100644 index 0000000000..3b16abf7fa --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_groups_request.go @@ -0,0 +1,24 @@ +package sarama + +type ListGroupsRequest struct { +} + +func (r *ListGroupsRequest) encode(pe packetEncoder) error { + return nil +} + +func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + return nil +} + +func (r *ListGroupsRequest) key() int16 { + return 16 +} + +func (r *ListGroupsRequest) version() int16 { + return 0 +} + +func (r *ListGroupsRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go new file mode 100644 index 0000000000..56115d4c75 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_groups_response.go @@ -0,0 +1,69 @@ +package sarama + +type ListGroupsResponse struct { + Err KError + Groups map[string]string +} + +func (r *ListGroupsResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + + if err := pe.putArrayLength(len(r.Groups)); err != nil { + return err + } + for groupId, protocolType := range r.Groups { + if err := pe.putString(groupId); err != nil { + return err + } + if err := pe.putString(protocolType); err != nil { + return err + } + } + + return nil +} + +func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.Groups = make(map[string]string) + for i := 0; i < n; i++ { + groupId, err := pd.getString() + if err != nil { + return err + } + protocolType, err := pd.getString() + if err != nil { + return err + } + + r.Groups[groupId] = protocolType + } + + return nil +} + +func (r *ListGroupsResponse) key() int16 { + return 16 +} + +func (r *ListGroupsResponse) version() int16 { + return 0 +} + +func (r *ListGroupsResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go new file mode 100644 index 0000000000..fecdbfdef7 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/message.go @@ -0,0 +1,223 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "time" + + "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +// CompressionCodec represents the various compression codecs recognized by Kafka in messages. +type CompressionCodec int8 + +// only the last two bits are really used +const compressionCodecMask int8 = 0x03 + +const ( + CompressionNone CompressionCodec = 0 + CompressionGZIP CompressionCodec = 1 + CompressionSnappy CompressionCodec = 2 + CompressionLZ4 CompressionCodec = 3 +) + +func (cc CompressionCodec) String() string { + return []string{ + "none", + "gzip", + "snappy", + "lz4", + }[int(cc)] +} + +// CompressionLevelDefault is the constant to use in CompressionLevel +// to have the default compression level for any codec. The value is picked +// that we don't use any existing compression levels. +const CompressionLevelDefault = -1000 + +type Message struct { + Codec CompressionCodec // codec used to compress the message contents + CompressionLevel int // compression level + Key []byte // the message key, may be nil + Value []byte // the message contents + Set *MessageSet // the message set a message might wrap + Version int8 // v1 requires Kafka 0.10 + Timestamp time.Time // the timestamp of the message (version 1+ only) + + compressedCache []byte + compressedSize int // used for computing the compression ratio metrics +} + +func (m *Message) encode(pe packetEncoder) error { + pe.push(newCRC32Field(crcIEEE)) + + pe.putInt8(m.Version) + + attributes := int8(m.Codec) & compressionCodecMask + pe.putInt8(attributes) + + if m.Version >= 1 { + if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil { + return err + } + } + + err := pe.putBytes(m.Key) + if err != nil { + return err + } + + var payload []byte + + if m.compressedCache != nil { + payload = m.compressedCache + m.compressedCache = nil + } else if m.Value != nil { + switch m.Codec { + case CompressionNone: + payload = m.Value + case CompressionGZIP: + var buf bytes.Buffer + var writer *gzip.Writer + if m.CompressionLevel != CompressionLevelDefault { + writer, err = gzip.NewWriterLevel(&buf, m.CompressionLevel) + if err != nil { + return err + } + } else { + writer = gzip.NewWriter(&buf) + } + if _, err = writer.Write(m.Value); err != nil { + return err + } + if err = writer.Close(); err != nil { + return err + } + m.compressedCache = buf.Bytes() + payload = m.compressedCache + case CompressionSnappy: + tmp := snappy.Encode(m.Value) + m.compressedCache = tmp + payload = m.compressedCache + case CompressionLZ4: + var buf bytes.Buffer + writer := lz4.NewWriter(&buf) + if _, err = writer.Write(m.Value); err != nil { + return err + } + if err = writer.Close(); err != nil { + return err + } + m.compressedCache = buf.Bytes() + payload = m.compressedCache + + default: + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)} + } + // Keep in mind the compressed payload size for metric gathering + m.compressedSize = len(payload) + } + + if err = pe.putBytes(payload); err != nil { + return err + } + + return pe.pop() +} + +func (m *Message) decode(pd packetDecoder) (err error) { + err = pd.push(newCRC32Field(crcIEEE)) + if err != nil { + return err + } + + m.Version, err = pd.getInt8() + if err != nil { + return err + } + + if m.Version > 1 { + return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)} + } + + attribute, err := pd.getInt8() + if err != nil { + return err + } + m.Codec = CompressionCodec(attribute & compressionCodecMask) + + if m.Version == 1 { + if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil { + return err + } + } + + m.Key, err = pd.getBytes() + if err != nil { + return err + } + + m.Value, err = pd.getBytes() + if err != nil { + return err + } + + // Required for deep equal assertion during tests but might be useful + // for future metrics about the compression ratio in fetch requests + m.compressedSize = len(m.Value) + + switch m.Codec { + case CompressionNone: + // nothing to do + case CompressionGZIP: + if m.Value == nil { + break + } + reader, err := gzip.NewReader(bytes.NewReader(m.Value)) + if err != nil { + return err + } + if m.Value, err = ioutil.ReadAll(reader); err != nil { + return err + } + if err := m.decodeSet(); err != nil { + return err + } + case CompressionSnappy: + if m.Value == nil { + break + } + if m.Value, err = snappy.Decode(m.Value); err != nil { + return err + } + if err := m.decodeSet(); err != nil { + return err + } + case CompressionLZ4: + if m.Value == nil { + break + } + reader := lz4.NewReader(bytes.NewReader(m.Value)) + if m.Value, err = ioutil.ReadAll(reader); err != nil { + return err + } + if err := m.decodeSet(); err != nil { + return err + } + + default: + return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)} + } + + return pd.pop() +} + +// decodes a message set from a previousy encoded bulk-message +func (m *Message) decodeSet() (err error) { + pd := realDecoder{raw: m.Value} + m.Set = &MessageSet{} + return m.Set.decode(&pd) +} diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go new file mode 100644 index 0000000000..27db52fdf1 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/message_set.go @@ -0,0 +1,102 @@ +package sarama + +type MessageBlock struct { + Offset int64 + Msg *Message +} + +// Messages convenience helper which returns either all the +// messages that are wrapped in this block +func (msb *MessageBlock) Messages() []*MessageBlock { + if msb.Msg.Set != nil { + return msb.Msg.Set.Messages + } + return []*MessageBlock{msb} +} + +func (msb *MessageBlock) encode(pe packetEncoder) error { + pe.putInt64(msb.Offset) + pe.push(&lengthField{}) + err := msb.Msg.encode(pe) + if err != nil { + return err + } + return pe.pop() +} + +func (msb *MessageBlock) decode(pd packetDecoder) (err error) { + if msb.Offset, err = pd.getInt64(); err != nil { + return err + } + + if err = pd.push(&lengthField{}); err != nil { + return err + } + + msb.Msg = new(Message) + if err = msb.Msg.decode(pd); err != nil { + return err + } + + if err = pd.pop(); err != nil { + return err + } + + return nil +} + +type MessageSet struct { + PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock + Messages []*MessageBlock +} + +func (ms *MessageSet) encode(pe packetEncoder) error { + for i := range ms.Messages { + err := ms.Messages[i].encode(pe) + if err != nil { + return err + } + } + return nil +} + +func (ms *MessageSet) decode(pd packetDecoder) (err error) { + ms.Messages = nil + + for pd.remaining() > 0 { + magic, err := magicValue(pd) + if err != nil { + if err == ErrInsufficientData { + ms.PartialTrailingMessage = true + return nil + } + return err + } + + if magic > 1 { + return nil + } + + msb := new(MessageBlock) + err = msb.decode(pd) + switch err { + case nil: + ms.Messages = append(ms.Messages, msb) + case ErrInsufficientData: + // As an optimization the server is allowed to return a partial message at the + // end of the message set. Clients should handle this case. So we just ignore such things. + ms.PartialTrailingMessage = true + return nil + default: + return err + } + } + + return nil +} + +func (ms *MessageSet) addMessage(msg *Message) { + block := new(MessageBlock) + block.Msg = msg + ms.Messages = append(ms.Messages, block) +} diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go new file mode 100644 index 0000000000..48adfa28cb --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metadata_request.go @@ -0,0 +1,88 @@ +package sarama + +type MetadataRequest struct { + Version int16 + Topics []string + AllowAutoTopicCreation bool +} + +func (r *MetadataRequest) encode(pe packetEncoder) error { + if r.Version < 0 || r.Version > 5 { + return PacketEncodingError{"invalid or unsupported MetadataRequest version field"} + } + if r.Version == 0 || r.Topics != nil || len(r.Topics) > 0 { + err := pe.putArrayLength(len(r.Topics)) + if err != nil { + return err + } + + for i := range r.Topics { + err = pe.putString(r.Topics[i]) + if err != nil { + return err + } + } + } else { + pe.putInt32(-1) + } + if r.Version > 3 { + pe.putBool(r.AllowAutoTopicCreation) + } + return nil +} + +func (r *MetadataRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + size, err := pd.getInt32() + if err != nil { + return err + } + if size < 0 { + return nil + } else { + topicCount := size + if topicCount == 0 { + return nil + } + + r.Topics = make([]string, topicCount) + for i := range r.Topics { + topic, err := pd.getString() + if err != nil { + return err + } + r.Topics[i] = topic + } + } + if r.Version > 3 { + autoCreation, err := pd.getBool() + if err != nil { + return err + } + r.AllowAutoTopicCreation = autoCreation + } + return nil +} + +func (r *MetadataRequest) key() int16 { + return 3 +} + +func (r *MetadataRequest) version() int16 { + return r.Version +} + +func (r *MetadataRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_0_0 + case 2: + return V0_10_1_0 + case 3, 4: + return V0_11_0_0 + case 5: + return V1_0_0_0 + default: + return MinVersion + } +} diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go new file mode 100644 index 0000000000..bf8a67bbc5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metadata_response.go @@ -0,0 +1,310 @@ +package sarama + +type PartitionMetadata struct { + Err KError + ID int32 + Leader int32 + Replicas []int32 + Isr []int32 + OfflineReplicas []int32 +} + +func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + pm.Err = KError(tmp) + + pm.ID, err = pd.getInt32() + if err != nil { + return err + } + + pm.Leader, err = pd.getInt32() + if err != nil { + return err + } + + pm.Replicas, err = pd.getInt32Array() + if err != nil { + return err + } + + pm.Isr, err = pd.getInt32Array() + if err != nil { + return err + } + + if version >= 5 { + pm.OfflineReplicas, err = pd.getInt32Array() + if err != nil { + return err + } + } + + return nil +} + +func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(pm.Err)) + pe.putInt32(pm.ID) + pe.putInt32(pm.Leader) + + err = pe.putInt32Array(pm.Replicas) + if err != nil { + return err + } + + err = pe.putInt32Array(pm.Isr) + if err != nil { + return err + } + + if version >= 5 { + err = pe.putInt32Array(pm.OfflineReplicas) + if err != nil { + return err + } + } + + return nil +} + +type TopicMetadata struct { + Err KError + Name string + IsInternal bool // Only valid for Version >= 1 + Partitions []*PartitionMetadata +} + +func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + tm.Err = KError(tmp) + + tm.Name, err = pd.getString() + if err != nil { + return err + } + + if version >= 1 { + tm.IsInternal, err = pd.getBool() + if err != nil { + return err + } + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + tm.Partitions = make([]*PartitionMetadata, n) + for i := 0; i < n; i++ { + tm.Partitions[i] = new(PartitionMetadata) + err = tm.Partitions[i].decode(pd, version) + if err != nil { + return err + } + } + + return nil +} + +func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(tm.Err)) + + err = pe.putString(tm.Name) + if err != nil { + return err + } + + if version >= 1 { + pe.putBool(tm.IsInternal) + } + + err = pe.putArrayLength(len(tm.Partitions)) + if err != nil { + return err + } + + for _, pm := range tm.Partitions { + err = pm.encode(pe, version) + if err != nil { + return err + } + } + + return nil +} + +type MetadataResponse struct { + Version int16 + ThrottleTimeMs int32 + Brokers []*Broker + ClusterID *string + ControllerID int32 + Topics []*TopicMetadata +} + +func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if version >= 3 { + r.ThrottleTimeMs, err = pd.getInt32() + if err != nil { + return err + } + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Brokers = make([]*Broker, n) + for i := 0; i < n; i++ { + r.Brokers[i] = new(Broker) + err = r.Brokers[i].decode(pd, version) + if err != nil { + return err + } + } + + if version >= 2 { + r.ClusterID, err = pd.getNullableString() + if err != nil { + return err + } + } + + if version >= 1 { + r.ControllerID, err = pd.getInt32() + if err != nil { + return err + } + } else { + r.ControllerID = -1 + } + + n, err = pd.getArrayLength() + if err != nil { + return err + } + + r.Topics = make([]*TopicMetadata, n) + for i := 0; i < n; i++ { + r.Topics[i] = new(TopicMetadata) + err = r.Topics[i].decode(pd, version) + if err != nil { + return err + } + } + + return nil +} + +func (r *MetadataResponse) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(r.Brokers)) + if err != nil { + return err + } + for _, broker := range r.Brokers { + err = broker.encode(pe, r.Version) + if err != nil { + return err + } + } + + if r.Version >= 1 { + pe.putInt32(r.ControllerID) + } + + err = pe.putArrayLength(len(r.Topics)) + if err != nil { + return err + } + for _, tm := range r.Topics { + err = tm.encode(pe, r.Version) + if err != nil { + return err + } + } + + return nil +} + +func (r *MetadataResponse) key() int16 { + return 3 +} + +func (r *MetadataResponse) version() int16 { + return r.Version +} + +func (r *MetadataResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_0_0 + case 2: + return V0_10_1_0 + case 3, 4: + return V0_11_0_0 + case 5: + return V1_0_0_0 + default: + return MinVersion + } +} + +// testing API + +func (r *MetadataResponse) AddBroker(addr string, id int32) { + r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr}) +} + +func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { + var tmatch *TopicMetadata + + for _, tm := range r.Topics { + if tm.Name == topic { + tmatch = tm + goto foundTopic + } + } + + tmatch = new(TopicMetadata) + tmatch.Name = topic + r.Topics = append(r.Topics, tmatch) + +foundTopic: + + tmatch.Err = err + return tmatch +} + +func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) { + tmatch := r.AddTopic(topic, ErrNoError) + var pmatch *PartitionMetadata + + for _, pm := range tmatch.Partitions { + if pm.ID == partition { + pmatch = pm + goto foundPartition + } + } + + pmatch = new(PartitionMetadata) + pmatch.ID = partition + tmatch.Partitions = append(tmatch.Partitions, pmatch) + +foundPartition: + + pmatch.Leader = brokerID + pmatch.Replicas = replicas + pmatch.Isr = isr + pmatch.Err = err + +} diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go new file mode 100644 index 0000000000..4869708e94 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metrics.go @@ -0,0 +1,51 @@ +package sarama + +import ( + "fmt" + "strings" + + "github.com/rcrowley/go-metrics" +) + +// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library: +// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution, +// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements. +// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38 +const ( + metricsReservoirSize = 1028 + metricsAlphaFactor = 0.015 +) + +func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram { + return r.GetOrRegister(name, func() metrics.Histogram { + return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor)) + }).(metrics.Histogram) +} + +func getMetricNameForBroker(name string, broker *Broker) string { + // Use broker id like the Java client as it does not contain '.' or ':' characters that + // can be interpreted as special character by monitoring tool (e.g. Graphite) + return fmt.Sprintf(name+"-for-broker-%d", broker.ID()) +} + +func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter { + return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r) +} + +func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram { + return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r) +} + +func getMetricNameForTopic(name string, topic string) string { + // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy + // cf. KAFKA-1902 and KAFKA-2337 + return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1)) +} + +func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter { + return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r) +} + +func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram { + return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r) +} diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go new file mode 100644 index 0000000000..55ef1e2920 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mockbroker.go @@ -0,0 +1,330 @@ +package sarama + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "reflect" + "strconv" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" +) + +const ( + expectationTimeout = 500 * time.Millisecond +) + +type requestHandlerFunc func(req *request) (res encoder) + +// RequestNotifierFunc is invoked when a mock broker processes a request successfully +// and will provides the number of bytes read and written. +type RequestNotifierFunc func(bytesRead, bytesWritten int) + +// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed +// to facilitate testing of higher level or specialized consumers and producers +// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol, +// but rather provides a facility to do that. It takes care of the TCP +// transport, request unmarshaling, response marshaling, and makes it the test +// writer responsibility to program correct according to the Kafka API protocol +// MockBroker behaviour. +// +// MockBroker is implemented as a TCP server listening on a kernel-selected +// localhost port that can accept many connections. It reads Kafka requests +// from that connection and returns responses programmed by the SetHandlerByMap +// function. If a MockBroker receives a request that it has no programmed +// response for, then it returns nothing and the request times out. +// +// A set of MockRequest builders to define mappings used by MockBroker is +// provided by Sarama. But users can develop MockRequests of their own and use +// them along with or instead of the standard ones. +// +// When running tests with MockBroker it is strongly recommended to specify +// a timeout to `go test` so that if the broker hangs waiting for a response, +// the test panics. +// +// It is not necessary to prefix message length or correlation ID to your +// response bytes, the server does that automatically as a convenience. +type MockBroker struct { + brokerID int32 + port int32 + closing chan none + stopper chan none + expectations chan encoder + listener net.Listener + t TestReporter + latency time.Duration + handler requestHandlerFunc + notifier RequestNotifierFunc + history []RequestResponse + lock sync.Mutex +} + +// RequestResponse represents a Request/Response pair processed by MockBroker. +type RequestResponse struct { + Request protocolBody + Response encoder +} + +// SetLatency makes broker pause for the specified period every time before +// replying. +func (b *MockBroker) SetLatency(latency time.Duration) { + b.latency = latency +} + +// SetHandlerByMap defines mapping of Request types to MockResponses. When a +// request is received by the broker, it looks up the request type in the map +// and uses the found MockResponse instance to generate an appropriate reply. +// If the request type is not found in the map then nothing is sent. +func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { + b.setHandler(func(req *request) (res encoder) { + reqTypeName := reflect.TypeOf(req.body).Elem().Name() + mockResponse := handlerMap[reqTypeName] + if mockResponse == nil { + return nil + } + return mockResponse.For(req.body) + }) +} + +// SetNotifier set a function that will get invoked whenever a request has been +// processed successfully and will provide the number of bytes read and written +func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) { + b.lock.Lock() + b.notifier = notifier + b.lock.Unlock() +} + +// BrokerID returns broker ID assigned to the broker. +func (b *MockBroker) BrokerID() int32 { + return b.brokerID +} + +// History returns a slice of RequestResponse pairs in the order they were +// processed by the broker. Note that in case of multiple connections to the +// broker the order expected by a test can be different from the order recorded +// in the history, unless some synchronization is implemented in the test. +func (b *MockBroker) History() []RequestResponse { + b.lock.Lock() + history := make([]RequestResponse, len(b.history)) + copy(history, b.history) + b.lock.Unlock() + return history +} + +// Port returns the TCP port number the broker is listening for requests on. +func (b *MockBroker) Port() int32 { + return b.port +} + +// Addr returns the broker connection string in the form "
:". +func (b *MockBroker) Addr() string { + return b.listener.Addr().String() +} + +// Close terminates the broker blocking until it stops internal goroutines and +// releases all resources. +func (b *MockBroker) Close() { + close(b.expectations) + if len(b.expectations) > 0 { + buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID())) + for e := range b.expectations { + _, _ = buf.WriteString(spew.Sdump(e)) + } + b.t.Error(buf.String()) + } + close(b.closing) + <-b.stopper +} + +// setHandler sets the specified function as the request handler. Whenever +// a mock broker reads a request from the wire it passes the request to the +// function and sends back whatever the handler function returns. +func (b *MockBroker) setHandler(handler requestHandlerFunc) { + b.lock.Lock() + b.handler = handler + b.lock.Unlock() +} + +func (b *MockBroker) serverLoop() { + defer close(b.stopper) + var err error + var conn net.Conn + + go func() { + <-b.closing + err := b.listener.Close() + if err != nil { + b.t.Error(err) + } + }() + + wg := &sync.WaitGroup{} + i := 0 + for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() { + wg.Add(1) + go b.handleRequests(conn, i, wg) + i++ + } + wg.Wait() + Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) +} + +func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) { + defer wg.Done() + defer func() { + _ = conn.Close() + }() + Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx) + var err error + + abort := make(chan none) + defer close(abort) + go func() { + select { + case <-b.closing: + _ = conn.Close() + case <-abort: + } + }() + + resHeader := make([]byte, 8) + for { + req, bytesRead, err := decodeRequest(conn) + if err != nil { + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) + b.serverError(err) + break + } + + if b.latency > 0 { + time.Sleep(b.latency) + } + + b.lock.Lock() + res := b.handler(req) + b.history = append(b.history, RequestResponse{req.body, res}) + b.lock.Unlock() + + if res == nil { + Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) + continue + } + Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) + + encodedRes, err := encode(res, nil) + if err != nil { + b.serverError(err) + break + } + if len(encodedRes) == 0 { + b.lock.Lock() + if b.notifier != nil { + b.notifier(bytesRead, 0) + } + b.lock.Unlock() + continue + } + + binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4)) + binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID)) + if _, err = conn.Write(resHeader); err != nil { + b.serverError(err) + break + } + if _, err = conn.Write(encodedRes); err != nil { + b.serverError(err) + break + } + + b.lock.Lock() + if b.notifier != nil { + b.notifier(bytesRead, len(resHeader)+len(encodedRes)) + } + b.lock.Unlock() + } + Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) +} + +func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) { + select { + case res, ok := <-b.expectations: + if !ok { + return nil + } + return res + case <-time.After(expectationTimeout): + return nil + } +} + +func (b *MockBroker) serverError(err error) { + isConnectionClosedError := false + if _, ok := err.(*net.OpError); ok { + isConnectionClosedError = true + } else if err == io.EOF { + isConnectionClosedError = true + } else if err.Error() == "use of closed network connection" { + isConnectionClosedError = true + } + + if isConnectionClosedError { + return + } + + b.t.Errorf(err.Error()) +} + +// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the +// test framework and a channel of responses to use. If an error occurs it is +// simply logged to the TestReporter and the broker exits. +func NewMockBroker(t TestReporter, brokerID int32) *MockBroker { + return NewMockBrokerAddr(t, brokerID, "localhost:0") +} + +// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give +// it rather than just some ephemeral port. +func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker { + listener, err := net.Listen("tcp", addr) + if err != nil { + t.Fatal(err) + } + return NewMockBrokerListener(t, brokerID, listener) +} + +// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified. +func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker { + var err error + + broker := &MockBroker{ + closing: make(chan none), + stopper: make(chan none), + t: t, + brokerID: brokerID, + expectations: make(chan encoder, 512), + listener: listener, + } + broker.handler = broker.defaultRequestHandler + + Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String()) + _, portStr, err := net.SplitHostPort(broker.listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + tmp, err := strconv.ParseInt(portStr, 10, 32) + if err != nil { + t.Fatal(err) + } + broker.port = int32(tmp) + + go broker.serverLoop() + + return broker +} + +func (b *MockBroker) Returns(e encoder) { + b.expectations <- e +} diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go new file mode 100644 index 0000000000..5541d32ec6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mockresponses.go @@ -0,0 +1,540 @@ +package sarama + +import ( + "fmt" +) + +// TestReporter has methods matching go's testing.T to avoid importing +// `testing` in the main part of the library. +type TestReporter interface { + Error(...interface{}) + Errorf(string, ...interface{}) + Fatal(...interface{}) + Fatalf(string, ...interface{}) +} + +// MockResponse is a response builder interface it defines one method that +// allows generating a response based on a request body. MockResponses are used +// to program behavior of MockBroker in tests. +type MockResponse interface { + For(reqBody versionedDecoder) (res encoder) +} + +// MockWrapper is a mock response builder that returns a particular concrete +// response regardless of the actual request passed to the `For` method. +type MockWrapper struct { + res encoder +} + +func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) { + return mw.res +} + +func NewMockWrapper(res encoder) *MockWrapper { + return &MockWrapper{res: res} +} + +// MockSequence is a mock response builder that is created from a sequence of +// concrete responses. Every time when a `MockBroker` calls its `For` method +// the next response from the sequence is returned. When the end of the +// sequence is reached the last element from the sequence is returned. +type MockSequence struct { + responses []MockResponse +} + +func NewMockSequence(responses ...interface{}) *MockSequence { + ms := &MockSequence{} + ms.responses = make([]MockResponse, len(responses)) + for i, res := range responses { + switch res := res.(type) { + case MockResponse: + ms.responses[i] = res + case encoder: + ms.responses[i] = NewMockWrapper(res) + default: + panic(fmt.Sprintf("Unexpected response type: %T", res)) + } + } + return ms +} + +func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) { + res = mc.responses[0].For(reqBody) + if len(mc.responses) > 1 { + mc.responses = mc.responses[1:] + } + return res +} + +// MockMetadataResponse is a `MetadataResponse` builder. +type MockMetadataResponse struct { + controllerID int32 + leaders map[string]map[int32]int32 + brokers map[string]int32 + t TestReporter +} + +func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse { + return &MockMetadataResponse{ + leaders: make(map[string]map[int32]int32), + brokers: make(map[string]int32), + t: t, + } +} + +func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse { + partitions := mmr.leaders[topic] + if partitions == nil { + partitions = make(map[int32]int32) + mmr.leaders[topic] = partitions + } + partitions[partition] = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse { + mmr.brokers[addr] = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse { + mmr.controllerID = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { + metadataRequest := reqBody.(*MetadataRequest) + metadataResponse := &MetadataResponse{ + Version: metadataRequest.version(), + ControllerID: mmr.controllerID, + } + for addr, brokerID := range mmr.brokers { + metadataResponse.AddBroker(addr, brokerID) + } + if len(metadataRequest.Topics) == 0 { + for topic, partitions := range mmr.leaders { + for partition, brokerID := range partitions { + metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + } + } + return metadataResponse + } + for _, topic := range metadataRequest.Topics { + for partition, brokerID := range mmr.leaders[topic] { + metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + } + } + return metadataResponse +} + +// MockOffsetResponse is an `OffsetResponse` builder. +type MockOffsetResponse struct { + offsets map[string]map[int32]map[int64]int64 + t TestReporter + version int16 +} + +func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { + return &MockOffsetResponse{ + offsets: make(map[string]map[int32]map[int64]int64), + t: t, + } +} + +func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse { + mor.version = version + return mor +} + +func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse { + partitions := mor.offsets[topic] + if partitions == nil { + partitions = make(map[int32]map[int64]int64) + mor.offsets[topic] = partitions + } + times := partitions[partition] + if times == nil { + times = make(map[int64]int64) + partitions[partition] = times + } + times[time] = offset + return mor +} + +func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder { + offsetRequest := reqBody.(*OffsetRequest) + offsetResponse := &OffsetResponse{Version: mor.version} + for topic, partitions := range offsetRequest.blocks { + for partition, block := range partitions { + offset := mor.getOffset(topic, partition, block.time) + offsetResponse.AddTopicPartition(topic, partition, offset) + } + } + return offsetResponse +} + +func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 { + partitions := mor.offsets[topic] + if partitions == nil { + mor.t.Errorf("missing topic: %s", topic) + } + times := partitions[partition] + if times == nil { + mor.t.Errorf("missing partition: %d", partition) + } + offset, ok := times[time] + if !ok { + mor.t.Errorf("missing time: %d", time) + } + return offset +} + +// MockFetchResponse is a `FetchResponse` builder. +type MockFetchResponse struct { + messages map[string]map[int32]map[int64]Encoder + highWaterMarks map[string]map[int32]int64 + t TestReporter + batchSize int + version int16 +} + +func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { + return &MockFetchResponse{ + messages: make(map[string]map[int32]map[int64]Encoder), + highWaterMarks: make(map[string]map[int32]int64), + t: t, + batchSize: batchSize, + } +} + +func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse { + mfr.version = version + return mfr +} + +func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { + partitions := mfr.messages[topic] + if partitions == nil { + partitions = make(map[int32]map[int64]Encoder) + mfr.messages[topic] = partitions + } + messages := partitions[partition] + if messages == nil { + messages = make(map[int64]Encoder) + partitions[partition] = messages + } + messages[offset] = msg + return mfr +} + +func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse { + partitions := mfr.highWaterMarks[topic] + if partitions == nil { + partitions = make(map[int32]int64) + mfr.highWaterMarks[topic] = partitions + } + partitions[partition] = offset + return mfr +} + +func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder { + fetchRequest := reqBody.(*FetchRequest) + res := &FetchResponse{ + Version: mfr.version, + } + for topic, partitions := range fetchRequest.blocks { + for partition, block := range partitions { + initialOffset := block.fetchOffset + offset := initialOffset + maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition)) + for i := 0; i < mfr.batchSize && offset < maxOffset; { + msg := mfr.getMessage(topic, partition, offset) + if msg != nil { + res.AddMessage(topic, partition, nil, msg, offset) + i++ + } + offset++ + } + fb := res.GetBlock(topic, partition) + if fb == nil { + res.AddError(topic, partition, ErrNoError) + fb = res.GetBlock(topic, partition) + } + fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition) + } + } + return res +} + +func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder { + partitions := mfr.messages[topic] + if partitions == nil { + return nil + } + messages := partitions[partition] + if messages == nil { + return nil + } + return messages[offset] +} + +func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int { + partitions := mfr.messages[topic] + if partitions == nil { + return 0 + } + messages := partitions[partition] + if messages == nil { + return 0 + } + return len(messages) +} + +func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 { + partitions := mfr.highWaterMarks[topic] + if partitions == nil { + return 0 + } + return partitions[partition] +} + +// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder. +type MockConsumerMetadataResponse struct { + coordinators map[string]interface{} + t TestReporter +} + +func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse { + return &MockConsumerMetadataResponse{ + coordinators: make(map[string]interface{}), + t: t, + } +} + +func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse { + mr.coordinators[group] = broker + return mr +} + +func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse { + mr.coordinators[group] = kerror + return mr +} + +func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*ConsumerMetadataRequest) + group := req.ConsumerGroup + res := &ConsumerMetadataResponse{} + v := mr.coordinators[group] + switch v := v.(type) { + case *MockBroker: + res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} + case KError: + res.Err = v + } + return res +} + +// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder. +type MockFindCoordinatorResponse struct { + groupCoordinators map[string]interface{} + transCoordinators map[string]interface{} + t TestReporter +} + +func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse { + return &MockFindCoordinatorResponse{ + groupCoordinators: make(map[string]interface{}), + transCoordinators: make(map[string]interface{}), + t: t, + } +} + +func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse { + switch coordinatorType { + case CoordinatorGroup: + mr.groupCoordinators[group] = broker + case CoordinatorTransaction: + mr.transCoordinators[group] = broker + } + return mr +} + +func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse { + switch coordinatorType { + case CoordinatorGroup: + mr.groupCoordinators[group] = kerror + case CoordinatorTransaction: + mr.transCoordinators[group] = kerror + } + return mr +} + +func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*FindCoordinatorRequest) + res := &FindCoordinatorResponse{} + var v interface{} + switch req.CoordinatorType { + case CoordinatorGroup: + v = mr.groupCoordinators[req.CoordinatorKey] + case CoordinatorTransaction: + v = mr.transCoordinators[req.CoordinatorKey] + } + switch v := v.(type) { + case *MockBroker: + res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} + case KError: + res.Err = v + } + return res +} + +// MockOffsetCommitResponse is a `OffsetCommitResponse` builder. +type MockOffsetCommitResponse struct { + errors map[string]map[string]map[int32]KError + t TestReporter +} + +func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse { + return &MockOffsetCommitResponse{t: t} +} + +func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse { + if mr.errors == nil { + mr.errors = make(map[string]map[string]map[int32]KError) + } + topics := mr.errors[group] + if topics == nil { + topics = make(map[string]map[int32]KError) + mr.errors[group] = topics + } + partitions := topics[topic] + if partitions == nil { + partitions = make(map[int32]KError) + topics[topic] = partitions + } + partitions[partition] = kerror + return mr +} + +func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*OffsetCommitRequest) + group := req.ConsumerGroup + res := &OffsetCommitResponse{} + for topic, partitions := range req.blocks { + for partition := range partitions { + res.AddError(topic, partition, mr.getError(group, topic, partition)) + } + } + return res +} + +func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError { + topics := mr.errors[group] + if topics == nil { + return ErrNoError + } + partitions := topics[topic] + if partitions == nil { + return ErrNoError + } + kerror, ok := partitions[partition] + if !ok { + return ErrNoError + } + return kerror +} + +// MockProduceResponse is a `ProduceResponse` builder. +type MockProduceResponse struct { + version int16 + errors map[string]map[int32]KError + t TestReporter +} + +func NewMockProduceResponse(t TestReporter) *MockProduceResponse { + return &MockProduceResponse{t: t} +} + +func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse { + mr.version = version + return mr +} + +func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse { + if mr.errors == nil { + mr.errors = make(map[string]map[int32]KError) + } + partitions := mr.errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + mr.errors[topic] = partitions + } + partitions[partition] = kerror + return mr +} + +func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*ProduceRequest) + res := &ProduceResponse{ + Version: mr.version, + } + for topic, partitions := range req.records { + for partition := range partitions { + res.AddTopicPartition(topic, partition, mr.getError(topic, partition)) + } + } + return res +} + +func (mr *MockProduceResponse) getError(topic string, partition int32) KError { + partitions := mr.errors[topic] + if partitions == nil { + return ErrNoError + } + kerror, ok := partitions[partition] + if !ok { + return ErrNoError + } + return kerror +} + +// MockOffsetFetchResponse is a `OffsetFetchResponse` builder. +type MockOffsetFetchResponse struct { + offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock + t TestReporter +} + +func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse { + return &MockOffsetFetchResponse{t: t} +} + +func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse { + if mr.offsets == nil { + mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock) + } + topics := mr.offsets[group] + if topics == nil { + topics = make(map[string]map[int32]*OffsetFetchResponseBlock) + mr.offsets[group] = topics + } + partitions := topics[topic] + if partitions == nil { + partitions = make(map[int32]*OffsetFetchResponseBlock) + topics[topic] = partitions + } + partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror} + return mr +} + +func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*OffsetFetchRequest) + group := req.ConsumerGroup + res := &OffsetFetchResponse{} + for topic, partitions := range mr.offsets[group] { + for partition, block := range partitions { + res.AddBlock(topic, partition, block) + } + } + return res +} diff --git a/vendor/github.com/Shopify/sarama/mocks/README.md b/vendor/github.com/Shopify/sarama/mocks/README.md new file mode 100644 index 0000000000..55a6c2e61c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mocks/README.md @@ -0,0 +1,13 @@ +# sarama/mocks + +The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types. +You can use them to test your sarama applications using dependency injection. + +The following mock objects are available: + +- [Consumer](https://godoc.org/github.com/Shopify/sarama/mocks#Consumer), which will create [PartitionConsumer](https://godoc.org/github.com/Shopify/sarama/mocks#PartitionConsumer) mocks. +- [AsyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#AsyncProducer) +- [SyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#SyncProducer) + +The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified, +and the results will be reported to the `*testing.T` object you provided when creating the mock. diff --git a/vendor/github.com/Shopify/sarama/mocks/async_producer.go b/vendor/github.com/Shopify/sarama/mocks/async_producer.go new file mode 100644 index 0000000000..24ae5c0d58 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mocks/async_producer.go @@ -0,0 +1,174 @@ +package mocks + +import ( + "sync" + + "github.com/Shopify/sarama" +) + +// AsyncProducer implements sarama's Producer interface for testing purposes. +// Before you can send messages to it's Input channel, you have to set expectations +// so it knows how to handle the input; it returns an error if the number of messages +// received is bigger then the number of expectations set. You can also set a +// function in each expectation so that the message value is checked by this function +// and an error is returned if the match fails. +type AsyncProducer struct { + l sync.Mutex + t ErrorReporter + expectations []*producerExpectation + closed chan struct{} + input chan *sarama.ProducerMessage + successes chan *sarama.ProducerMessage + errors chan *sarama.ProducerError + lastOffset int64 +} + +// NewAsyncProducer instantiates a new Producer mock. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument is used to determine whether it +// should ack successes on the Successes channel. +func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer { + if config == nil { + config = sarama.NewConfig() + } + mp := &AsyncProducer{ + t: t, + closed: make(chan struct{}, 0), + expectations: make([]*producerExpectation, 0), + input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), + successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), + errors: make(chan *sarama.ProducerError, config.ChannelBufferSize), + } + + go func() { + defer func() { + close(mp.successes) + close(mp.errors) + }() + + for msg := range mp.input { + mp.l.Lock() + if mp.expectations == nil || len(mp.expectations) == 0 { + mp.expectations = nil + mp.t.Errorf("No more expectation set on this mock producer to handle the input message.") + } else { + expectation := mp.expectations[0] + mp.expectations = mp.expectations[1:] + if expectation.CheckFunction != nil { + if val, err := msg.Value.Encode(); err != nil { + mp.t.Errorf("Input message encoding failed: %s", err.Error()) + mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} + } else { + err = expectation.CheckFunction(val) + if err != nil { + mp.t.Errorf("Check function returned an error: %s", err.Error()) + mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} + } + } + } + if expectation.Result == errProduceSuccess { + mp.lastOffset++ + if config.Producer.Return.Successes { + msg.Offset = mp.lastOffset + mp.successes <- msg + } + } else { + if config.Producer.Return.Errors { + mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg} + } + } + } + mp.l.Unlock() + } + + mp.l.Lock() + if len(mp.expectations) > 0 { + mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations)) + } + mp.l.Unlock() + + close(mp.closed) + }() + + return mp +} + +//////////////////////////////////////////////// +// Implement Producer interface +//////////////////////////////////////////////// + +// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation. +// By closing a mock producer, you also tell it that no more input will be provided, so it will +// write an error to the test state if there's any remaining expectations. +func (mp *AsyncProducer) AsyncClose() { + close(mp.input) +} + +// Close corresponds with the Close method of sarama's Producer implementation. +// By closing a mock producer, you also tell it that no more input will be provided, so it will +// write an error to the test state if there's any remaining expectations. +func (mp *AsyncProducer) Close() error { + mp.AsyncClose() + <-mp.closed + return nil +} + +// Input corresponds with the Input method of sarama's Producer implementation. +// You have to set expectations on the mock producer before writing messages to the Input +// channel, so it knows how to handle them. If there is no more remaining expectations and +// a messages is written to the Input channel, the mock producer will write an error to the test +// state object. +func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage { + return mp.input +} + +// Successes corresponds with the Successes method of sarama's Producer implementation. +func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage { + return mp.successes +} + +// Errors corresponds with the Errors method of sarama's Producer implementation. +func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError { + return mp.errors +} + +//////////////////////////////////////////////// +// Setting expectations +//////////////////////////////////////////////// + +// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message +// will be provided on the input channel. The mock producer will call the given function to check +// the message value. If an error is returned it will be made available on the Errors channel +// otherwise the mock will handle the message as if it produced successfully, i.e. it will make +// it available on the Successes channel if the Producer.Return.Successes setting is set to true. +func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecker) { + mp.l.Lock() + defer mp.l.Unlock() + mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) +} + +// ExpectInputWithCheckerFunctionAndFail sets an expectation on the mock producer that a message +// will be provided on the input channel. The mock producer will first call the given function to +// check the message value. If an error is returned it will be made available on the Errors channel +// otherwise the mock will handle the message as if it failed to produce successfully. This means +// it will make a ProducerError available on the Errors channel. +func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndFail(cf ValueChecker, err error) { + mp.l.Lock() + defer mp.l.Unlock() + mp.expectations = append(mp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) +} + +// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided +// on the input channel. The mock producer will handle the message as if it is produced successfully, +// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting +// is set to true. +func (mp *AsyncProducer) ExpectInputAndSucceed() { + mp.ExpectInputWithCheckerFunctionAndSucceed(nil) +} + +// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided +// on the input channel. The mock producer will handle the message as if it failed to produce +// successfully. This means it will make a ProducerError available on the Errors channel. +func (mp *AsyncProducer) ExpectInputAndFail(err error) { + mp.ExpectInputWithCheckerFunctionAndFail(nil, err) +} diff --git a/vendor/github.com/Shopify/sarama/mocks/consumer.go b/vendor/github.com/Shopify/sarama/mocks/consumer.go new file mode 100644 index 0000000000..003d4d3e28 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mocks/consumer.go @@ -0,0 +1,315 @@ +package mocks + +import ( + "sync" + "sync/atomic" + + "github.com/Shopify/sarama" +) + +// Consumer implements sarama's Consumer interface for testing purposes. +// Before you can start consuming from this consumer, you have to register +// topic/partitions using ExpectConsumePartition, and set expectations on them. +type Consumer struct { + l sync.Mutex + t ErrorReporter + config *sarama.Config + partitionConsumers map[string]map[int32]*PartitionConsumer + metadata map[string][]int32 +} + +// NewConsumer returns a new mock Consumer instance. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument can be set to nil. +func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer { + if config == nil { + config = sarama.NewConfig() + } + + c := &Consumer{ + t: t, + config: config, + partitionConsumers: make(map[string]map[int32]*PartitionConsumer), + } + return c +} + +/////////////////////////////////////////////////// +// Consumer interface implementation +/////////////////////////////////////////////////// + +// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface. +// Before you can start consuming a partition, you have to set expectations on it using +// ExpectConsumePartition. You can only consume a partition once per consumer. +func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil { + c.t.Errorf("No expectations set for %s/%d", topic, partition) + return nil, errOutOfExpectations + } + + pc := c.partitionConsumers[topic][partition] + if pc.consumed { + return nil, sarama.ConfigurationError("The topic/partition is already being consumed") + } + + if pc.offset != AnyOffset && pc.offset != offset { + c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset) + } + + pc.consumed = true + return pc, nil +} + +// Topics returns a list of topics, as registered with SetMetadata +func (c *Consumer) Topics() ([]string, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.metadata == nil { + c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetMetadata.") + return nil, sarama.ErrOutOfBrokers + } + + var result []string + for topic := range c.metadata { + result = append(result, topic) + } + return result, nil +} + +// Partitions returns the list of parititons for the given topic, as registered with SetMetadata +func (c *Consumer) Partitions(topic string) ([]int32, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.metadata == nil { + c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetMetadata.") + return nil, sarama.ErrOutOfBrokers + } + if c.metadata[topic] == nil { + return nil, sarama.ErrUnknownTopicOrPartition + } + + return c.metadata[topic], nil +} + +func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { + c.l.Lock() + defer c.l.Unlock() + + hwms := make(map[string]map[int32]int64, len(c.partitionConsumers)) + for topic, partitionConsumers := range c.partitionConsumers { + hwm := make(map[int32]int64, len(partitionConsumers)) + for partition, pc := range partitionConsumers { + hwm[partition] = pc.HighWaterMarkOffset() + } + hwms[topic] = hwm + } + + return hwms +} + +// Close implements the Close method from the sarama.Consumer interface. It will close +// all registered PartitionConsumer instances. +func (c *Consumer) Close() error { + c.l.Lock() + defer c.l.Unlock() + + for _, partitions := range c.partitionConsumers { + for _, partitionConsumer := range partitions { + _ = partitionConsumer.Close() + } + } + + return nil +} + +/////////////////////////////////////////////////// +// Expectation API +/////////////////////////////////////////////////// + +// SetTopicMetadata sets the clusters topic/partition metadata, +// which will be returned by Topics() and Partitions(). +func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) { + c.l.Lock() + defer c.l.Unlock() + + c.metadata = metadata +} + +// ExpectConsumePartition will register a topic/partition, so you can set expectations on it. +// The registered PartitionConsumer will be returned, so you can set expectations +// on it using method chaining. Once a topic/partition is registered, you are +// expected to start consuming it using ConsumePartition. If that doesn't happen, +// an error will be written to the error reporter once the mock consumer is closed. It will +// also expect that the +func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer { + c.l.Lock() + defer c.l.Unlock() + + if c.partitionConsumers[topic] == nil { + c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer) + } + + if c.partitionConsumers[topic][partition] == nil { + c.partitionConsumers[topic][partition] = &PartitionConsumer{ + t: c.t, + topic: topic, + partition: partition, + offset: offset, + messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize), + errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize), + } + } + + return c.partitionConsumers[topic][partition] +} + +/////////////////////////////////////////////////// +// PartitionConsumer mock type +/////////////////////////////////////////////////// + +// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes. +// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is +// registered first using the Consumer's ExpectConsumePartition method. Before consuming the +// Errors and Messages channel, you should specify what values will be provided on these +// channels using YieldMessage and YieldError. +type PartitionConsumer struct { + highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG + l sync.Mutex + t ErrorReporter + topic string + partition int32 + offset int64 + messages chan *sarama.ConsumerMessage + errors chan *sarama.ConsumerError + singleClose sync.Once + consumed bool + errorsShouldBeDrained bool + messagesShouldBeDrained bool +} + +/////////////////////////////////////////////////// +// PartitionConsumer interface implementation +/////////////////////////////////////////////////// + +// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) AsyncClose() { + pc.singleClose.Do(func() { + close(pc.messages) + close(pc.errors) + }) +} + +// Close implements the Close method from the sarama.PartitionConsumer interface. It will +// verify whether the partition consumer was actually started. +func (pc *PartitionConsumer) Close() error { + if !pc.consumed { + pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition) + return errPartitionConsumerNotStarted + } + + if pc.errorsShouldBeDrained && len(pc.errors) > 0 { + pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors)) + } + + if pc.messagesShouldBeDrained && len(pc.messages) > 0 { + pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages)) + } + + pc.AsyncClose() + + var ( + closeErr error + wg sync.WaitGroup + ) + + wg.Add(1) + go func() { + defer wg.Done() + + var errs = make(sarama.ConsumerErrors, 0) + for err := range pc.errors { + errs = append(errs, err) + } + + if len(errs) > 0 { + closeErr = errs + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + for range pc.messages { + // drain + } + }() + + wg.Wait() + return closeErr +} + +// Errors implements the Errors method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError { + return pc.errors +} + +// Messages implements the Messages method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage { + return pc.messages +} + +func (pc *PartitionConsumer) HighWaterMarkOffset() int64 { + return atomic.LoadInt64(&pc.highWaterMarkOffset) + 1 +} + +/////////////////////////////////////////////////// +// Expectation API +/////////////////////////////////////////////////// + +// YieldMessage will yield a messages Messages channel of this partition consumer +// when it is consumed. By default, the mock consumer will not verify whether this +// message was consumed from the Messages channel, because there are legitimate +// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will +// verify that the channel is empty on close. +func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) { + pc.l.Lock() + defer pc.l.Unlock() + + msg.Topic = pc.topic + msg.Partition = pc.partition + msg.Offset = atomic.AddInt64(&pc.highWaterMarkOffset, 1) + + pc.messages <- msg +} + +// YieldError will yield an error on the Errors channel of this partition consumer +// when it is consumed. By default, the mock consumer will not verify whether this error was +// consumed from the Errors channel, because there are legitimate reasons for this +// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that +// the channel is empty on close. +func (pc *PartitionConsumer) YieldError(err error) { + pc.errors <- &sarama.ConsumerError{ + Topic: pc.topic, + Partition: pc.partition, + Err: err, + } +} + +// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer +// that the messages channel will be fully drained when Close is called. If this +// expectation is not met, an error is reported to the error reporter. +func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() { + pc.messagesShouldBeDrained = true +} + +// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer +// that the errors channel will be fully drained when Close is called. If this +// expectation is not met, an error is reported to the error reporter. +func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() { + pc.errorsShouldBeDrained = true +} diff --git a/vendor/github.com/Shopify/sarama/mocks/mocks.go b/vendor/github.com/Shopify/sarama/mocks/mocks.go new file mode 100644 index 0000000000..4adb838d99 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mocks/mocks.go @@ -0,0 +1,48 @@ +/* +Package mocks provides mocks that can be used for testing applications +that use Sarama. The mock types provided by this package implement the +interfaces Sarama exports, so you can use them for dependency injection +in your tests. + +All mock instances require you to set expectations on them before you +can use them. It will determine how the mock will behave. If an +expectation is not met, it will make your test fail. + +NOTE: this package currently does not fall under the API stability +guarantee of Sarama as it is still considered experimental. +*/ +package mocks + +import ( + "errors" + + "github.com/Shopify/sarama" +) + +// ErrorReporter is a simple interface that includes the testing.T methods we use to report +// expectation violations when using the mock objects. +type ErrorReporter interface { + Errorf(string, ...interface{}) +} + +// ValueChecker is a function type to be set in each expectation of the producer mocks +// to check the value passed. +type ValueChecker func(val []byte) error + +var ( + errProduceSuccess error = nil + errOutOfExpectations = errors.New("No more expectations set on mock") + errPartitionConsumerNotStarted = errors.New("The partition consumer was never started") +) + +const AnyOffset int64 = -1000 + +type producerExpectation struct { + Result error + CheckFunction ValueChecker +} + +type consumerExpectation struct { + Err error + Msg *sarama.ConsumerMessage +} diff --git a/vendor/github.com/Shopify/sarama/mocks/sync_producer.go b/vendor/github.com/Shopify/sarama/mocks/sync_producer.go new file mode 100644 index 0000000000..3f4986e2f8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mocks/sync_producer.go @@ -0,0 +1,157 @@ +package mocks + +import ( + "sync" + + "github.com/Shopify/sarama" +) + +// SyncProducer implements sarama's SyncProducer interface for testing purposes. +// Before you can use it, you have to set expectations on the mock SyncProducer +// to tell it how to handle calls to SendMessage, so you can easily test success +// and failure scenarios. +type SyncProducer struct { + l sync.Mutex + t ErrorReporter + expectations []*producerExpectation + lastOffset int64 +} + +// NewSyncProducer instantiates a new SyncProducer mock. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument is currently unused, but is +// maintained to be compatible with the async Producer. +func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer { + return &SyncProducer{ + t: t, + expectations: make([]*producerExpectation, 0), + } +} + +//////////////////////////////////////////////// +// Implement SyncProducer interface +//////////////////////////////////////////////// + +// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation. +// You have to set expectations on the mock producer before calling SendMessage, so it knows +// how to handle them. You can set a function in each expectation so that the message value +// checked by this function and an error is returned if the match fails. +// If there is no more remaining expectation when SendMessage is called, +// the mock producer will write an error to the test state object. +func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) { + sp.l.Lock() + defer sp.l.Unlock() + + if len(sp.expectations) > 0 { + expectation := sp.expectations[0] + sp.expectations = sp.expectations[1:] + if expectation.CheckFunction != nil { + val, err := msg.Value.Encode() + if err != nil { + sp.t.Errorf("Input message encoding failed: %s", err.Error()) + return -1, -1, err + } + + errCheck := expectation.CheckFunction(val) + if errCheck != nil { + sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) + return -1, -1, errCheck + } + } + if expectation.Result == errProduceSuccess { + sp.lastOffset++ + msg.Offset = sp.lastOffset + return 0, msg.Offset, nil + } + return -1, -1, expectation.Result + } + sp.t.Errorf("No more expectation set on this mock producer to handle the input message.") + return -1, -1, errOutOfExpectations +} + +// SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation. +// You have to set expectations on the mock producer before calling SendMessages, so it knows +// how to handle them. If there is no more remaining expectations when SendMessages is called, +// the mock producer will write an error to the test state object. +func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error { + sp.l.Lock() + defer sp.l.Unlock() + + if len(sp.expectations) >= len(msgs) { + expectations := sp.expectations[0:len(msgs)] + sp.expectations = sp.expectations[len(msgs):] + + for i, expectation := range expectations { + if expectation.CheckFunction != nil { + val, err := msgs[i].Value.Encode() + if err != nil { + sp.t.Errorf("Input message encoding failed: %s", err.Error()) + return err + } + errCheck := expectation.CheckFunction(val) + if errCheck != nil { + sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) + return errCheck + } + } + if expectation.Result != errProduceSuccess { + return expectation.Result + } + } + return nil + } + sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.") + return errOutOfExpectations +} + +// Close corresponds with the Close method of sarama's SyncProducer implementation. +// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow, +// so it will write an error to the test state if there's any remaining expectations. +func (sp *SyncProducer) Close() error { + sp.l.Lock() + defer sp.l.Unlock() + + if len(sp.expectations) > 0 { + sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations)) + } + + return nil +} + +//////////////////////////////////////////////// +// Setting expectations +//////////////////////////////////////////////// + +// ExpectSendMessageWithCheckerFunctionAndSucceed sets an expectation on the mock producer that SendMessage +// will be called. The mock producer will first call the given function to check the message value. +// It will cascade the error of the function, if any, or handle the message as if it produced +// successfully, i.e. by returning a valid partition, and offset, and a nil error. +func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueChecker) { + sp.l.Lock() + defer sp.l.Unlock() + sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) +} + +// ExpectSendMessageWithCheckerFunctionAndFail sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will first call the given function to check the message value. +// It will cascade the error of the function, if any, or handle the message as if it failed +// to produce successfully, i.e. by returning the provided error. +func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndFail(cf ValueChecker, err error) { + sp.l.Lock() + defer sp.l.Unlock() + sp.expectations = append(sp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) +} + +// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will handle the message as if it produced successfully, i.e. by +// returning a valid partition, and offset, and a nil error. +func (sp *SyncProducer) ExpectSendMessageAndSucceed() { + sp.ExpectSendMessageWithCheckerFunctionAndSucceed(nil) +} + +// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will handle the message as if it failed to produce +// successfully, i.e. by returning the provided error. +func (sp *SyncProducer) ExpectSendMessageAndFail(err error) { + sp.ExpectSendMessageWithCheckerFunctionAndFail(nil, err) +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go new file mode 100644 index 0000000000..37e99fbf5b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go @@ -0,0 +1,204 @@ +package sarama + +import "errors" + +// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which +// tells the broker to set the timestamp to the time at which the request was received. +// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2. +const ReceiveTime int64 = -1 + +// GroupGenerationUndefined is a special value for the group generation field of +// Offset Commit Requests that should be used when a consumer group does not rely +// on Kafka for partition management. +const GroupGenerationUndefined = -1 + +type offsetCommitRequestBlock struct { + offset int64 + timestamp int64 + metadata string +} + +func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { + pe.putInt64(b.offset) + if version == 1 { + pe.putInt64(b.timestamp) + } else if b.timestamp != 0 { + Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored") + } + + return pe.putString(b.metadata) +} + +func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) { + if b.offset, err = pd.getInt64(); err != nil { + return err + } + if version == 1 { + if b.timestamp, err = pd.getInt64(); err != nil { + return err + } + } + b.metadata, err = pd.getString() + return err +} + +type OffsetCommitRequest struct { + ConsumerGroup string + ConsumerGroupGeneration int32 // v1 or later + ConsumerID string // v1 or later + RetentionTime int64 // v2 or later + + // Version can be: + // - 0 (kafka 0.8.1 and later) + // - 1 (kafka 0.8.2 and later) + // - 2 (kafka 0.9.0 and later) + Version int16 + blocks map[string]map[int32]*offsetCommitRequestBlock +} + +func (r *OffsetCommitRequest) encode(pe packetEncoder) error { + if r.Version < 0 || r.Version > 2 { + return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"} + } + + if err := pe.putString(r.ConsumerGroup); err != nil { + return err + } + + if r.Version >= 1 { + pe.putInt32(r.ConsumerGroupGeneration) + if err := pe.putString(r.ConsumerID); err != nil { + return err + } + } else { + if r.ConsumerGroupGeneration != 0 { + Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored") + } + if r.ConsumerID != "" { + Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored") + } + } + + if r.Version >= 2 { + pe.putInt64(r.RetentionTime) + } else if r.RetentionTime != 0 { + Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored") + } + + if err := pe.putArrayLength(len(r.blocks)); err != nil { + return err + } + for topic, partitions := range r.blocks { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe, r.Version); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ConsumerGroup, err = pd.getString(); err != nil { + return err + } + + if r.Version >= 1 { + if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil { + return err + } + if r.ConsumerID, err = pd.getString(); err != nil { + return err + } + } + + if r.Version >= 2 { + if r.RetentionTime, err = pd.getInt64(); err != nil { + return err + } + } + + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &offsetCommitRequestBlock{} + if err := block.decode(pd, r.Version); err != nil { + return err + } + r.blocks[topic][partition] = block + } + } + return nil +} + +func (r *OffsetCommitRequest) key() int16 { + return 8 +} + +func (r *OffsetCommitRequest) version() int16 { + return r.Version +} + +func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_9_0_0 + default: + return MinVersion + } +} + +func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) + } + + r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata} +} + +func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) { + partitions := r.blocks[topic] + if partitions == nil { + return 0, "", errors.New("No such offset") + } + block := partitions[partitionID] + if block == nil { + return 0, "", errors.New("No such offset") + } + return block.offset, block.metadata, nil +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go new file mode 100644 index 0000000000..a4b18acdff --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go @@ -0,0 +1,85 @@ +package sarama + +type OffsetCommitResponse struct { + Errors map[string]map[int32]KError +} + +func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) { + if r.Errors == nil { + r.Errors = make(map[string]map[int32]KError) + } + partitions := r.Errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + r.Errors[topic] = partitions + } + partitions[partition] = kerror +} + +func (r *OffsetCommitResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Errors)); err != nil { + return err + } + for topic, partitions := range r.Errors { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, kerror := range partitions { + pe.putInt32(partition) + pe.putInt16(int16(kerror)) + } + } + return nil +} + +func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil || numTopics == 0 { + return err + } + + r.Errors = make(map[string]map[int32]KError, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numErrors, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Errors[name] = make(map[int32]KError, numErrors) + + for j := 0; j < numErrors; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + r.Errors[name][id] = KError(tmp) + } + } + + return nil +} + +func (r *OffsetCommitResponse) key() int16 { + return 8 +} + +func (r *OffsetCommitResponse) version() int16 { + return 0 +} + +func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { + return MinVersion +} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go new file mode 100644 index 0000000000..5a05014b48 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go @@ -0,0 +1,81 @@ +package sarama + +type OffsetFetchRequest struct { + ConsumerGroup string + Version int16 + partitions map[string][]int32 +} + +func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { + if r.Version < 0 || r.Version > 1 { + return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} + } + + if err = pe.putString(r.ConsumerGroup); err != nil { + return err + } + if err = pe.putArrayLength(len(r.partitions)); err != nil { + return err + } + for topic, partitions := range r.partitions { + if err = pe.putString(topic); err != nil { + return err + } + if err = pe.putInt32Array(partitions); err != nil { + return err + } + } + return nil +} + +func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.ConsumerGroup, err = pd.getString(); err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + if partitionCount == 0 { + return nil + } + r.partitions = make(map[string][]int32) + for i := 0; i < partitionCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitions, err := pd.getInt32Array() + if err != nil { + return err + } + r.partitions[topic] = partitions + } + return nil +} + +func (r *OffsetFetchRequest) key() int16 { + return 9 +} + +func (r *OffsetFetchRequest) version() int16 { + return r.Version +} + +func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + default: + return MinVersion + } +} + +func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) { + if r.partitions == nil { + r.partitions = make(map[string][]int32) + } + + r.partitions[topic] = append(r.partitions[topic], partitionID) +} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go new file mode 100644 index 0000000000..11e4b1f3fd --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go @@ -0,0 +1,143 @@ +package sarama + +type OffsetFetchResponseBlock struct { + Offset int64 + Metadata string + Err KError +} + +func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + b.Metadata, err = pd.getString() + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + return nil +} + +func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) { + pe.putInt64(b.Offset) + + err = pe.putString(b.Metadata) + if err != nil { + return err + } + + pe.putInt16(int16(b.Err)) + + return nil +} + +type OffsetFetchResponse struct { + Blocks map[string]map[int32]*OffsetFetchResponseBlock +} + +func (r *OffsetFetchResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Blocks)); err != nil { + return err + } + for topic, partitions := range r.Blocks { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil || numTopics == 0 { + return err + } + + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + if numBlocks == 0 { + r.Blocks[name] = nil + continue + } + r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetFetchResponseBlock) + err = block.decode(pd) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *OffsetFetchResponse) key() int16 { + return 9 +} + +func (r *OffsetFetchResponse) version() int16 { + return 0 +} + +func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { + return MinVersion +} + +func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock) + } + partitions := r.Blocks[topic] + if partitions == nil { + partitions = make(map[int32]*OffsetFetchResponseBlock) + r.Blocks[topic] = partitions + } + partitions[partition] = block +} diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go new file mode 100644 index 0000000000..6c01f959e9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_manager.go @@ -0,0 +1,560 @@ +package sarama + +import ( + "sync" + "time" +) + +// Offset Manager + +// OffsetManager uses Kafka to store and fetch consumed partition offsets. +type OffsetManager interface { + // ManagePartition creates a PartitionOffsetManager on the given topic/partition. + // It will return an error if this OffsetManager is already managing the given + // topic/partition. + ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) + + // Close stops the OffsetManager from managing offsets. It is required to call + // this function before an OffsetManager object passes out of scope, as it + // will otherwise leak memory. You must call this after all the + // PartitionOffsetManagers are closed. + Close() error +} + +type offsetManager struct { + client Client + conf *Config + group string + + lock sync.Mutex + poms map[string]map[int32]*partitionOffsetManager + boms map[*Broker]*brokerOffsetManager +} + +// NewOffsetManagerFromClient creates a new OffsetManager from the given client. +// It is still necessary to call Close() on the underlying client when finished with the partition manager. +func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + om := &offsetManager{ + client: client, + conf: client.Config(), + group: group, + poms: make(map[string]map[int32]*partitionOffsetManager), + boms: make(map[*Broker]*brokerOffsetManager), + } + + return om, nil +} + +func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) { + pom, err := om.newPartitionOffsetManager(topic, partition) + if err != nil { + return nil, err + } + + om.lock.Lock() + defer om.lock.Unlock() + + topicManagers := om.poms[topic] + if topicManagers == nil { + topicManagers = make(map[int32]*partitionOffsetManager) + om.poms[topic] = topicManagers + } + + if topicManagers[partition] != nil { + return nil, ConfigurationError("That topic/partition is already being managed") + } + + topicManagers[partition] = pom + return pom, nil +} + +func (om *offsetManager) Close() error { + return nil +} + +func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager { + om.lock.Lock() + defer om.lock.Unlock() + + bom := om.boms[broker] + if bom == nil { + bom = om.newBrokerOffsetManager(broker) + om.boms[broker] = bom + } + + bom.refs++ + + return bom +} + +func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) { + om.lock.Lock() + defer om.lock.Unlock() + + bom.refs-- + + if bom.refs == 0 { + close(bom.updateSubscriptions) + if om.boms[bom.broker] == bom { + delete(om.boms, bom.broker) + } + } +} + +func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) { + om.lock.Lock() + defer om.lock.Unlock() + + delete(om.boms, bom.broker) +} + +func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) { + om.lock.Lock() + defer om.lock.Unlock() + + delete(om.poms[pom.topic], pom.partition) + if len(om.poms[pom.topic]) == 0 { + delete(om.poms, pom.topic) + } +} + +// Partition Offset Manager + +// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close() +// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes +// out of scope. +type PartitionOffsetManager interface { + // NextOffset returns the next offset that should be consumed for the managed + // partition, accompanied by metadata which can be used to reconstruct the state + // of the partition consumer when it resumes. NextOffset() will return + // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset + // was committed for this partition yet. + NextOffset() (int64, string) + + // MarkOffset marks the provided offset, alongside a metadata string + // that represents the state of the partition consumer at that point in time. The + // metadata string can be used by another consumer to restore that state, so it + // can resume consumption. + // + // To follow upstream conventions, you are expected to mark the offset of the + // next message to read, not the last message read. Thus, when calling `MarkOffset` + // you should typically add one to the offset of the last consumed message. + // + // Note: calling MarkOffset does not necessarily commit the offset to the backend + // store immediately for efficiency reasons, and it may never be committed if + // your application crashes. This means that you may end up processing the same + // message twice, and your processing should ideally be idempotent. + MarkOffset(offset int64, metadata string) + + // ResetOffset resets to the provided offset, alongside a metadata string that + // represents the state of the partition consumer at that point in time. Reset + // acts as a counterpart to MarkOffset, the difference being that it allows to + // reset an offset to an earlier or smaller value, where MarkOffset only + // allows incrementing the offset. cf MarkOffset for more details. + ResetOffset(offset int64, metadata string) + + // Errors returns a read channel of errors that occur during offset management, if + // enabled. By default, errors are logged and not returned over this channel. If + // you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan *ConsumerError + + // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will + // return immediately, after which you should wait until the 'errors' channel has + // been drained and closed. It is required to call this function, or Close before + // a consumer object passes out of scope, as it will otherwise leak memory. You + // must call this before calling Close on the underlying client. + AsyncClose() + + // Close stops the PartitionOffsetManager from managing offsets. It is required to + // call this function (or AsyncClose) before a PartitionOffsetManager object + // passes out of scope, as it will otherwise leak memory. You must call this + // before calling Close on the underlying client. + Close() error +} + +type partitionOffsetManager struct { + parent *offsetManager + topic string + partition int32 + + lock sync.Mutex + offset int64 + metadata string + dirty bool + clean sync.Cond + broker *brokerOffsetManager + + errors chan *ConsumerError + rebalance chan none + dying chan none +} + +func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) { + pom := &partitionOffsetManager{ + parent: om, + topic: topic, + partition: partition, + errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), + rebalance: make(chan none, 1), + dying: make(chan none), + } + pom.clean.L = &pom.lock + + if err := pom.selectBroker(); err != nil { + return nil, err + } + + if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil { + return nil, err + } + + pom.broker.updateSubscriptions <- pom + + go withRecover(pom.mainLoop) + + return pom, nil +} + +func (pom *partitionOffsetManager) mainLoop() { + for { + select { + case <-pom.rebalance: + if err := pom.selectBroker(); err != nil { + pom.handleError(err) + pom.rebalance <- none{} + } else { + pom.broker.updateSubscriptions <- pom + } + case <-pom.dying: + if pom.broker != nil { + select { + case <-pom.rebalance: + case pom.broker.updateSubscriptions <- pom: + } + pom.parent.unrefBrokerOffsetManager(pom.broker) + } + pom.parent.abandonPartitionOffsetManager(pom) + close(pom.errors) + return + } + } +} + +func (pom *partitionOffsetManager) selectBroker() error { + if pom.broker != nil { + pom.parent.unrefBrokerOffsetManager(pom.broker) + pom.broker = nil + } + + var broker *Broker + var err error + + if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil { + return err + } + + if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil { + return err + } + + pom.broker = pom.parent.refBrokerOffsetManager(broker) + return nil +} + +func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error { + request := new(OffsetFetchRequest) + request.Version = 1 + request.ConsumerGroup = pom.parent.group + request.AddPartition(pom.topic, pom.partition) + + response, err := pom.broker.broker.FetchOffset(request) + if err != nil { + return err + } + + block := response.GetBlock(pom.topic, pom.partition) + if block == nil { + return ErrIncompleteResponse + } + + switch block.Err { + case ErrNoError: + pom.offset = block.Offset + pom.metadata = block.Metadata + return nil + case ErrNotCoordinatorForConsumer: + if retries <= 0 { + return block.Err + } + if err := pom.selectBroker(); err != nil { + return err + } + return pom.fetchInitialOffset(retries - 1) + case ErrOffsetsLoadInProgress: + if retries <= 0 { + return block.Err + } + time.Sleep(pom.parent.conf.Metadata.Retry.Backoff) + return pom.fetchInitialOffset(retries - 1) + default: + return block.Err + } +} + +func (pom *partitionOffsetManager) handleError(err error) { + cErr := &ConsumerError{ + Topic: pom.topic, + Partition: pom.partition, + Err: err, + } + + if pom.parent.conf.Consumer.Return.Errors { + pom.errors <- cErr + } else { + Logger.Println(cErr) + } +} + +func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError { + return pom.errors +} + +func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if offset > pom.offset { + pom.offset = offset + pom.metadata = metadata + pom.dirty = true + } +} + +func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if offset <= pom.offset { + pom.offset = offset + pom.metadata = metadata + pom.dirty = true + } +} + +func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if pom.offset == offset && pom.metadata == metadata { + pom.dirty = false + pom.clean.Signal() + } +} + +func (pom *partitionOffsetManager) NextOffset() (int64, string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if pom.offset >= 0 { + return pom.offset, pom.metadata + } + + return pom.parent.conf.Consumer.Offsets.Initial, "" +} + +func (pom *partitionOffsetManager) AsyncClose() { + go func() { + pom.lock.Lock() + defer pom.lock.Unlock() + + for pom.dirty { + pom.clean.Wait() + } + + close(pom.dying) + }() +} + +func (pom *partitionOffsetManager) Close() error { + pom.AsyncClose() + + var errors ConsumerErrors + for err := range pom.errors { + errors = append(errors, err) + } + + if len(errors) > 0 { + return errors + } + return nil +} + +// Broker Offset Manager + +type brokerOffsetManager struct { + parent *offsetManager + broker *Broker + timer *time.Ticker + updateSubscriptions chan *partitionOffsetManager + subscriptions map[*partitionOffsetManager]none + refs int +} + +func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager { + bom := &brokerOffsetManager{ + parent: om, + broker: broker, + timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval), + updateSubscriptions: make(chan *partitionOffsetManager), + subscriptions: make(map[*partitionOffsetManager]none), + } + + go withRecover(bom.mainLoop) + + return bom +} + +func (bom *brokerOffsetManager) mainLoop() { + for { + select { + case <-bom.timer.C: + if len(bom.subscriptions) > 0 { + bom.flushToBroker() + } + case s, ok := <-bom.updateSubscriptions: + if !ok { + bom.timer.Stop() + return + } + if _, ok := bom.subscriptions[s]; ok { + delete(bom.subscriptions, s) + } else { + bom.subscriptions[s] = none{} + } + } + } +} + +func (bom *brokerOffsetManager) flushToBroker() { + request := bom.constructRequest() + if request == nil { + return + } + + response, err := bom.broker.CommitOffset(request) + + if err != nil { + bom.abort(err) + return + } + + for s := range bom.subscriptions { + if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil { + continue + } + + var err KError + var ok bool + + if response.Errors[s.topic] == nil { + s.handleError(ErrIncompleteResponse) + delete(bom.subscriptions, s) + s.rebalance <- none{} + continue + } + if err, ok = response.Errors[s.topic][s.partition]; !ok { + s.handleError(ErrIncompleteResponse) + delete(bom.subscriptions, s) + s.rebalance <- none{} + continue + } + + switch err { + case ErrNoError: + block := request.blocks[s.topic][s.partition] + s.updateCommitted(block.offset, block.metadata) + case ErrNotLeaderForPartition, ErrLeaderNotAvailable, + ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer: + // not a critical error, we just need to redispatch + delete(bom.subscriptions, s) + s.rebalance <- none{} + case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize: + // nothing we can do about this, just tell the user and carry on + s.handleError(err) + case ErrOffsetsLoadInProgress: + // nothing wrong but we didn't commit, we'll get it next time round + break + case ErrUnknownTopicOrPartition: + // let the user know *and* try redispatching - if topic-auto-create is + // enabled, redispatching should trigger a metadata request and create the + // topic; if not then re-dispatching won't help, but we've let the user + // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) + fallthrough + default: + // dunno, tell the user and try redispatching + s.handleError(err) + delete(bom.subscriptions, s) + s.rebalance <- none{} + } + } +} + +func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest { + var r *OffsetCommitRequest + var perPartitionTimestamp int64 + if bom.parent.conf.Consumer.Offsets.Retention == 0 { + perPartitionTimestamp = ReceiveTime + r = &OffsetCommitRequest{ + Version: 1, + ConsumerGroup: bom.parent.group, + ConsumerGroupGeneration: GroupGenerationUndefined, + } + } else { + r = &OffsetCommitRequest{ + Version: 2, + RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond), + ConsumerGroup: bom.parent.group, + ConsumerGroupGeneration: GroupGenerationUndefined, + } + + } + + for s := range bom.subscriptions { + s.lock.Lock() + if s.dirty { + r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata) + } + s.lock.Unlock() + } + + if len(r.blocks) > 0 { + return r + } + + return nil +} + +func (bom *brokerOffsetManager) abort(err error) { + _ = bom.broker.Close() // we don't care about the error this might return, we already have one + bom.parent.abandonBroker(bom) + + for pom := range bom.subscriptions { + pom.handleError(err) + pom.rebalance <- none{} + } + + for s := range bom.updateSubscriptions { + if _, ok := bom.subscriptions[s]; !ok { + s.handleError(err) + s.rebalance <- none{} + } + } + + bom.subscriptions = make(map[*partitionOffsetManager]none) +} diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go new file mode 100644 index 0000000000..4c5df75df0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_request.go @@ -0,0 +1,132 @@ +package sarama + +type offsetRequestBlock struct { + time int64 + maxOffsets int32 // Only used in version 0 +} + +func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error { + pe.putInt64(int64(b.time)) + if version == 0 { + pe.putInt32(b.maxOffsets) + } + + return nil +} + +func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) { + if b.time, err = pd.getInt64(); err != nil { + return err + } + if version == 0 { + if b.maxOffsets, err = pd.getInt32(); err != nil { + return err + } + } + return nil +} + +type OffsetRequest struct { + Version int16 + blocks map[string]map[int32]*offsetRequestBlock +} + +func (r *OffsetRequest) encode(pe packetEncoder) error { + pe.putInt32(-1) // replica ID is always -1 for clients + err := pe.putArrayLength(len(r.blocks)) + if err != nil { + return err + } + for topic, partitions := range r.blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err = block.encode(pe, r.Version); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + + // Ignore replica ID + if _, err := pd.getInt32(); err != nil { + return err + } + blockCount, err := pd.getArrayLength() + if err != nil { + return err + } + if blockCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*offsetRequestBlock) + for i := 0; i < blockCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*offsetRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &offsetRequestBlock{} + if err := block.decode(pd, version); err != nil { + return err + } + r.blocks[topic][partition] = block + } + } + return nil +} + +func (r *OffsetRequest) key() int16 { + return 2 +} + +func (r *OffsetRequest) version() int16 { + return r.Version +} + +func (r *OffsetRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_1_0 + default: + return MinVersion + } +} + +func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*offsetRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*offsetRequestBlock) + } + + tmp := new(offsetRequestBlock) + tmp.time = time + if r.Version == 0 { + tmp.maxOffsets = maxOffsets + } + + r.blocks[topic][partitionID] = tmp +} diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go new file mode 100644 index 0000000000..8b2193f9a0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_response.go @@ -0,0 +1,174 @@ +package sarama + +type OffsetResponseBlock struct { + Err KError + Offsets []int64 // Version 0 + Offset int64 // Version 1 + Timestamp int64 // Version 1 +} + +func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + if version == 0 { + b.Offsets, err = pd.getInt64Array() + + return err + } + + b.Timestamp, err = pd.getInt64() + if err != nil { + return err + } + + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + // For backwards compatibility put the offset in the offsets array too + b.Offsets = []int64{b.Offset} + + return nil +} + +func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + + if version == 0 { + return pe.putInt64Array(b.Offsets) + } + + pe.putInt64(b.Timestamp) + pe.putInt64(b.Offset) + + return nil +} + +type OffsetResponse struct { + Version int16 + Blocks map[string]map[int32]*OffsetResponseBlock +} + +func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +/* +// [0 0 0 1 ntopics +0 8 109 121 95 116 111 112 105 99 topic +0 0 0 1 npartitions +0 0 0 0 id +0 0 + +0 0 0 1 0 0 0 0 +0 1 1 1 0 0 0 1 +0 8 109 121 95 116 111 112 +105 99 0 0 0 1 0 0 +0 0 0 0 0 0 0 1 +0 0 0 0 0 1 1 1] + +*/ +func (r *OffsetResponse) encode(pe packetEncoder) (err error) { + if err = pe.putArrayLength(len(r.Blocks)); err != nil { + return err + } + + for topic, partitions := range r.Blocks { + if err = pe.putString(topic); err != nil { + return err + } + if err = pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err = block.encode(pe, r.version()); err != nil { + return err + } + } + } + + return nil +} + +func (r *OffsetResponse) key() int16 { + return 2 +} + +func (r *OffsetResponse) version() int16 { + return r.Version +} + +func (r *OffsetResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_1_0 + default: + return MinVersion + } +} + +// testing API + +func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*OffsetResponseBlock) + } + byTopic, ok := r.Blocks[topic] + if !ok { + byTopic = make(map[int32]*OffsetResponseBlock) + r.Blocks[topic] = byTopic + } + byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset} +} diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go new file mode 100644 index 0000000000..74805ccbf5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/packet_decoder.go @@ -0,0 +1,60 @@ +package sarama + +// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules. +// Types implementing Decoder only need to worry about calling methods like GetString, +// not about how a string is represented in Kafka. +type packetDecoder interface { + // Primitives + getInt8() (int8, error) + getInt16() (int16, error) + getInt32() (int32, error) + getInt64() (int64, error) + getVarint() (int64, error) + getArrayLength() (int, error) + getBool() (bool, error) + + // Collections + getBytes() ([]byte, error) + getVarintBytes() ([]byte, error) + getRawBytes(length int) ([]byte, error) + getString() (string, error) + getNullableString() (*string, error) + getInt32Array() ([]int32, error) + getInt64Array() ([]int64, error) + getStringArray() ([]string, error) + + // Subsets + remaining() int + getSubset(length int) (packetDecoder, error) + peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset + + // Stacks, see PushDecoder + push(in pushDecoder) error + pop() error +} + +// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity +// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where +// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they +// depend upon have been decoded. +type pushDecoder interface { + // Saves the offset into the input buffer as the location to actually read the calculated value when able. + saveOffset(in int) + + // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). + reserveLength() int + + // Indicates that all required data is now available to calculate and check the field. + // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes + // of data from the saved offset, and verify it based on the data between the saved offset and curOffset. + check(curOffset int, buf []byte) error +} + +// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the +// fields itself is unknown until its value was decoded (for instance varint encoded length +// fields). +// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength() +type dynamicPushDecoder interface { + pushDecoder + decoder +} diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go new file mode 100644 index 0000000000..67b8daed82 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/packet_encoder.go @@ -0,0 +1,65 @@ +package sarama + +import "github.com/rcrowley/go-metrics" + +// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules. +// Types implementing Encoder only need to worry about calling methods like PutString, +// not about how a string is represented in Kafka. +type packetEncoder interface { + // Primitives + putInt8(in int8) + putInt16(in int16) + putInt32(in int32) + putInt64(in int64) + putVarint(in int64) + putArrayLength(in int) error + putBool(in bool) + + // Collections + putBytes(in []byte) error + putVarintBytes(in []byte) error + putRawBytes(in []byte) error + putString(in string) error + putNullableString(in *string) error + putStringArray(in []string) error + putInt32Array(in []int32) error + putInt64Array(in []int64) error + + // Provide the current offset to record the batch size metric + offset() int + + // Stacks, see PushEncoder + push(in pushEncoder) + pop() error + + // To record metrics when provided + metricRegistry() metrics.Registry +} + +// PushEncoder is the interface for encoding fields like CRCs and lengths where the value +// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where +// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they +// depend upon have been written. +type pushEncoder interface { + // Saves the offset into the input buffer as the location to actually write the calculated value when able. + saveOffset(in int) + + // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32). + reserveLength() int + + // Indicates that all required data is now available to calculate and write the field. + // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes + // of data to the saved offset, based on the data between the saved offset and curOffset. + run(curOffset int, buf []byte) error +} + +// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the +// fields itself is unknown until its value was computed (for instance varint encoded length +// fields). +type dynamicPushEncoder interface { + pushEncoder + + // Called during pop() to adjust the length of the field. + // It should return the difference in bytes between the last computed length and current length. + adjustLength(currOffset int) int +} diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go new file mode 100644 index 0000000000..972932728a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/partitioner.go @@ -0,0 +1,135 @@ +package sarama + +import ( + "hash" + "hash/fnv" + "math/rand" + "time" +) + +// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1], +// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided +// as simple default implementations. +type Partitioner interface { + // Partition takes a message and partition count and chooses a partition + Partition(message *ProducerMessage, numPartitions int32) (int32, error) + + // RequiresConsistency indicates to the user of the partitioner whether the + // mapping of key->partition is consistent or not. Specifically, if a + // partitioner requires consistency then it must be allowed to choose from all + // partitions (even ones known to be unavailable), and its choice must be + // respected by the caller. The obvious example is the HashPartitioner. + RequiresConsistency() bool +} + +// PartitionerConstructor is the type for a function capable of constructing new Partitioners. +type PartitionerConstructor func(topic string) Partitioner + +type manualPartitioner struct{} + +// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided +// ProducerMessage's Partition field as the partition to produce to. +func NewManualPartitioner(topic string) Partitioner { + return new(manualPartitioner) +} + +func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + return message.Partition, nil +} + +func (p *manualPartitioner) RequiresConsistency() bool { + return true +} + +type randomPartitioner struct { + generator *rand.Rand +} + +// NewRandomPartitioner returns a Partitioner which chooses a random partition each time. +func NewRandomPartitioner(topic string) Partitioner { + p := new(randomPartitioner) + p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + return p +} + +func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + return int32(p.generator.Intn(int(numPartitions))), nil +} + +func (p *randomPartitioner) RequiresConsistency() bool { + return false +} + +type roundRobinPartitioner struct { + partition int32 +} + +// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time. +func NewRoundRobinPartitioner(topic string) Partitioner { + return &roundRobinPartitioner{} +} + +func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + if p.partition >= numPartitions { + p.partition = 0 + } + ret := p.partition + p.partition++ + return ret, nil +} + +func (p *roundRobinPartitioner) RequiresConsistency() bool { + return false +} + +type hashPartitioner struct { + random Partitioner + hasher hash.Hash32 +} + +// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher. +// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that +// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance. +func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor { + return func(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = hasher() + return p + } +} + +// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a +// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used, +// modulus the number of partitions. This ensures that messages with the same key always end up on the +// same partition. +func NewHashPartitioner(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = fnv.New32a() + return p +} + +func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + if message.Key == nil { + return p.random.Partition(message, numPartitions) + } + bytes, err := message.Key.Encode() + if err != nil { + return -1, err + } + p.hasher.Reset() + _, err = p.hasher.Write(bytes) + if err != nil { + return -1, err + } + partition := int32(p.hasher.Sum32()) % numPartitions + if partition < 0 { + partition = -partition + } + return partition, nil +} + +func (p *hashPartitioner) RequiresConsistency() bool { + return true +} diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go new file mode 100644 index 0000000000..b633cd1511 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/prep_encoder.go @@ -0,0 +1,153 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "math" + + "github.com/rcrowley/go-metrics" +) + +type prepEncoder struct { + stack []pushEncoder + length int +} + +// primitives + +func (pe *prepEncoder) putInt8(in int8) { + pe.length++ +} + +func (pe *prepEncoder) putInt16(in int16) { + pe.length += 2 +} + +func (pe *prepEncoder) putInt32(in int32) { + pe.length += 4 +} + +func (pe *prepEncoder) putInt64(in int64) { + pe.length += 8 +} + +func (pe *prepEncoder) putVarint(in int64) { + var buf [binary.MaxVarintLen64]byte + pe.length += binary.PutVarint(buf[:], in) +} + +func (pe *prepEncoder) putArrayLength(in int) error { + if in > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} + } + pe.length += 4 + return nil +} + +func (pe *prepEncoder) putBool(in bool) { + pe.length++ +} + +// arrays + +func (pe *prepEncoder) putBytes(in []byte) error { + pe.length += 4 + if in == nil { + return nil + } + return pe.putRawBytes(in) +} + +func (pe *prepEncoder) putVarintBytes(in []byte) error { + if in == nil { + pe.putVarint(-1) + return nil + } + pe.putVarint(int64(len(in))) + return pe.putRawBytes(in) +} + +func (pe *prepEncoder) putRawBytes(in []byte) error { + if len(in) > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putNullableString(in *string) error { + if in == nil { + pe.length += 2 + return nil + } + return pe.putString(*in) +} + +func (pe *prepEncoder) putString(in string) error { + pe.length += 2 + if len(in) > math.MaxInt16 { + return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putStringArray(in []string) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + + for _, str := range in { + if err := pe.putString(str); err != nil { + return err + } + } + + return nil +} + +func (pe *prepEncoder) putInt32Array(in []int32) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + pe.length += 4 * len(in) + return nil +} + +func (pe *prepEncoder) putInt64Array(in []int64) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + pe.length += 8 * len(in) + return nil +} + +func (pe *prepEncoder) offset() int { + return pe.length +} + +// stackable + +func (pe *prepEncoder) push(in pushEncoder) { + in.saveOffset(pe.length) + pe.length += in.reserveLength() + pe.stack = append(pe.stack, in) +} + +func (pe *prepEncoder) pop() error { + in := pe.stack[len(pe.stack)-1] + pe.stack = pe.stack[:len(pe.stack)-1] + if dpe, ok := in.(dynamicPushEncoder); ok { + pe.length += dpe.adjustLength(pe.length) + } + + return nil +} + +// we do not record metrics during the prep encoder pass +func (pe *prepEncoder) metricRegistry() metrics.Registry { + return nil +} diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go new file mode 100644 index 0000000000..0c755d02b6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_request.go @@ -0,0 +1,252 @@ +package sarama + +import "github.com/rcrowley/go-metrics" + +// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements +// it must see before responding. Any of the constants defined here are valid. On broker versions +// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many +// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced +// by setting the `min.isr` value in the brokers configuration). +type RequiredAcks int16 + +const ( + // NoResponse doesn't send any response, the TCP ACK is all you get. + NoResponse RequiredAcks = 0 + // WaitForLocal waits for only the local commit to succeed before responding. + WaitForLocal RequiredAcks = 1 + // WaitForAll waits for all in-sync replicas to commit before responding. + // The minimum number of in-sync replicas is configured on the broker via + // the `min.insync.replicas` configuration key. + WaitForAll RequiredAcks = -1 +) + +type ProduceRequest struct { + TransactionalID *string + RequiredAcks RequiredAcks + Timeout int32 + Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11 + records map[string]map[int32]Records +} + +func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram, + topicCompressionRatioMetric metrics.Histogram) int64 { + var topicRecordCount int64 + for _, messageBlock := range msgSet.Messages { + // Is this a fake "message" wrapping real messages? + if messageBlock.Msg.Set != nil { + topicRecordCount += int64(len(messageBlock.Msg.Set.Messages)) + } else { + // A single uncompressed message + topicRecordCount++ + } + // Better be safe than sorry when computing the compression ratio + if messageBlock.Msg.compressedSize != 0 { + compressionRatio := float64(len(messageBlock.Msg.Value)) / + float64(messageBlock.Msg.compressedSize) + // Histogram do not support decimal values, let's multiple it by 100 for better precision + intCompressionRatio := int64(100 * compressionRatio) + compressionRatioMetric.Update(intCompressionRatio) + topicCompressionRatioMetric.Update(intCompressionRatio) + } + } + return topicRecordCount +} + +func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram, + topicCompressionRatioMetric metrics.Histogram) int64 { + if recordBatch.compressedRecords != nil { + compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100) + compressionRatioMetric.Update(compressionRatio) + topicCompressionRatioMetric.Update(compressionRatio) + } + + return int64(len(recordBatch.Records)) +} + +func (r *ProduceRequest) encode(pe packetEncoder) error { + if r.Version >= 3 { + if err := pe.putNullableString(r.TransactionalID); err != nil { + return err + } + } + pe.putInt16(int16(r.RequiredAcks)) + pe.putInt32(r.Timeout) + metricRegistry := pe.metricRegistry() + var batchSizeMetric metrics.Histogram + var compressionRatioMetric metrics.Histogram + if metricRegistry != nil { + batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry) + compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry) + } + totalRecordCount := int64(0) + + err := pe.putArrayLength(len(r.records)) + if err != nil { + return err + } + + for topic, partitions := range r.records { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + topicRecordCount := int64(0) + var topicCompressionRatioMetric metrics.Histogram + if metricRegistry != nil { + topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry) + } + for id, records := range partitions { + startOffset := pe.offset() + pe.putInt32(id) + pe.push(&lengthField{}) + err = records.encode(pe) + if err != nil { + return err + } + err = pe.pop() + if err != nil { + return err + } + if metricRegistry != nil { + if r.Version >= 3 { + topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric) + } else { + topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric) + } + batchSize := int64(pe.offset() - startOffset) + batchSizeMetric.Update(batchSize) + getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize) + } + } + if topicRecordCount > 0 { + getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount) + getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount) + totalRecordCount += topicRecordCount + } + } + if totalRecordCount > 0 { + metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount) + getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount) + } + + return nil +} + +func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + + if version >= 3 { + id, err := pd.getNullableString() + if err != nil { + return err + } + r.TransactionalID = id + } + requiredAcks, err := pd.getInt16() + if err != nil { + return err + } + r.RequiredAcks = RequiredAcks(requiredAcks) + if r.Timeout, err = pd.getInt32(); err != nil { + return err + } + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + + r.records = make(map[string]map[int32]Records) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.records[topic] = make(map[int32]Records) + + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + size, err := pd.getInt32() + if err != nil { + return err + } + recordsDecoder, err := pd.getSubset(int(size)) + if err != nil { + return err + } + var records Records + if err := records.decode(recordsDecoder); err != nil { + return err + } + r.records[topic][partition] = records + } + } + + return nil +} + +func (r *ProduceRequest) key() int16 { + return 0 +} + +func (r *ProduceRequest) version() int16 { + return r.Version +} + +func (r *ProduceRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_11_0_0 + default: + return MinVersion + } +} + +func (r *ProduceRequest) ensureRecords(topic string, partition int32) { + if r.records == nil { + r.records = make(map[string]map[int32]Records) + } + + if r.records[topic] == nil { + r.records[topic] = make(map[int32]Records) + } +} + +func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { + r.ensureRecords(topic, partition) + set := r.records[topic][partition].MsgSet + + if set == nil { + set = new(MessageSet) + r.records[topic][partition] = newLegacyRecords(set) + } + + set.addMessage(msg) +} + +func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { + r.ensureRecords(topic, partition) + r.records[topic][partition] = newLegacyRecords(set) +} + +func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) { + r.ensureRecords(topic, partition) + r.records[topic][partition] = newDefaultRecords(batch) +} diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go new file mode 100644 index 0000000000..667e34c661 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_response.go @@ -0,0 +1,183 @@ +package sarama + +import ( + "fmt" + "time" +) + +type ProduceResponseBlock struct { + Err KError + Offset int64 + // only provided if Version >= 2 and the broker is configured with `LogAppendTime` + Timestamp time.Time +} + +func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + if version >= 2 { + if millis, err := pd.getInt64(); err != nil { + return err + } else if millis != -1 { + b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + } + + return nil +} + +func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + pe.putInt64(b.Offset) + + if version >= 2 { + timestamp := int64(-1) + if !b.Timestamp.Before(time.Unix(0, 0)) { + timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond) + } else if !b.Timestamp.IsZero() { + return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)} + } + pe.putInt64(timestamp) + } + + return nil +} + +type ProduceResponse struct { + Blocks map[string]map[int32]*ProduceResponseBlock + Version int16 + ThrottleTime time.Duration // only provided if Version >= 1 +} + +func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(ProduceResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + if r.Version >= 1 { + millis, err := pd.getInt32() + if err != nil { + return err + } + + r.ThrottleTime = time.Duration(millis) * time.Millisecond + } + + return nil +} + +func (r *ProduceResponse) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(r.Blocks)) + if err != nil { + return err + } + for topic, partitions := range r.Blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for id, prb := range partitions { + pe.putInt32(id) + err = prb.encode(pe, r.Version) + if err != nil { + return err + } + } + } + if r.Version >= 1 { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + } + return nil +} + +func (r *ProduceResponse) key() int16 { + return 0 +} + +func (r *ProduceResponse) version() int16 { + return r.Version +} + +func (r *ProduceResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_11_0_0 + default: + return MinVersion + } +} + +func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +// Testing API + +func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*ProduceResponseBlock) + } + byTopic, ok := r.Blocks[topic] + if !ok { + byTopic = make(map[int32]*ProduceResponseBlock) + r.Blocks[topic] = byTopic + } + byTopic[partition] = &ProduceResponseBlock{Err: err} +} diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go new file mode 100644 index 0000000000..13be2b3c92 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_set.go @@ -0,0 +1,252 @@ +package sarama + +import ( + "encoding/binary" + "time" +) + +type partitionSet struct { + msgs []*ProducerMessage + recordsToSend Records + bufferBytes int +} + +type produceSet struct { + parent *asyncProducer + msgs map[string]map[int32]*partitionSet + + bufferBytes int + bufferCount int +} + +func newProduceSet(parent *asyncProducer) *produceSet { + return &produceSet{ + msgs: make(map[string]map[int32]*partitionSet), + parent: parent, + } +} + +func (ps *produceSet) add(msg *ProducerMessage) error { + var err error + var key, val []byte + + if msg.Key != nil { + if key, err = msg.Key.Encode(); err != nil { + return err + } + } + + if msg.Value != nil { + if val, err = msg.Value.Encode(); err != nil { + return err + } + } + + timestamp := msg.Timestamp + if msg.Timestamp.IsZero() { + timestamp = time.Now() + } + + partitions := ps.msgs[msg.Topic] + if partitions == nil { + partitions = make(map[int32]*partitionSet) + ps.msgs[msg.Topic] = partitions + } + + var size int + + set := partitions[msg.Partition] + if set == nil { + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + batch := &RecordBatch{ + FirstTimestamp: timestamp, + Version: 2, + ProducerID: -1, /* No producer id */ + Codec: ps.parent.conf.Producer.Compression, + CompressionLevel: ps.parent.conf.Producer.CompressionLevel, + } + set = &partitionSet{recordsToSend: newDefaultRecords(batch)} + size = recordBatchOverhead + } else { + set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))} + } + partitions[msg.Partition] = set + } + + set.msgs = append(set.msgs, msg) + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + // We are being conservative here to avoid having to prep encode the record + size += maximumRecordOverhead + rec := &Record{ + Key: key, + Value: val, + TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp), + } + size += len(key) + len(val) + if len(msg.Headers) > 0 { + rec.Headers = make([]*RecordHeader, len(msg.Headers)) + for i := range msg.Headers { + rec.Headers[i] = &msg.Headers[i] + size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32 + } + } + set.recordsToSend.RecordBatch.addRecord(rec) + } else { + msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val} + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + msgToSend.Timestamp = timestamp + msgToSend.Version = 1 + } + set.recordsToSend.MsgSet.addMessage(msgToSend) + size = producerMessageOverhead + len(key) + len(val) + } + + set.bufferBytes += size + ps.bufferBytes += size + ps.bufferCount++ + + return nil +} + +func (ps *produceSet) buildRequest() *ProduceRequest { + req := &ProduceRequest{ + RequiredAcks: ps.parent.conf.Producer.RequiredAcks, + Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond), + } + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + req.Version = 2 + } + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + req.Version = 3 + } + + for topic, partitionSet := range ps.msgs { + for partition, set := range partitionSet { + if req.Version >= 3 { + // If the API version we're hitting is 3 or greater, we need to calculate + // offsets for each record in the batch relative to FirstOffset. + // Additionally, we must set LastOffsetDelta to the value of the last offset + // in the batch. Since the OffsetDelta of the first record is 0, we know that the + // final record of any batch will have an offset of (# of records in batch) - 1. + // (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets + // under the RecordBatch section for details.) + rb := set.recordsToSend.RecordBatch + if len(rb.Records) > 0 { + rb.LastOffsetDelta = int32(len(rb.Records) - 1) + for i, record := range rb.Records { + record.OffsetDelta = int64(i) + } + } + + req.AddBatch(topic, partition, rb) + continue + } + if ps.parent.conf.Producer.Compression == CompressionNone { + req.AddSet(topic, partition, set.recordsToSend.MsgSet) + } else { + // When compression is enabled, the entire set for each partition is compressed + // and sent as the payload of a single fake "message" with the appropriate codec + // set and no key. When the server sees a message with a compression codec, it + // decompresses the payload and treats the result as its message set. + + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + // If our version is 0.10 or later, assign relative offsets + // to the inner messages. This lets the broker avoid + // recompressing the message set. + // (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets + // for details on relative offsets.) + for i, msg := range set.recordsToSend.MsgSet.Messages { + msg.Offset = int64(i) + } + } + payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry) + if err != nil { + Logger.Println(err) // if this happens, it's basically our fault. + panic(err) + } + compMsg := &Message{ + Codec: ps.parent.conf.Producer.Compression, + CompressionLevel: ps.parent.conf.Producer.CompressionLevel, + Key: nil, + Value: payload, + Set: set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics + } + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + compMsg.Version = 1 + compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp + } + req.AddMessage(topic, partition, compMsg) + } + } + } + + return req +} + +func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) { + for topic, partitionSet := range ps.msgs { + for partition, set := range partitionSet { + cb(topic, partition, set.msgs) + } + } +} + +func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage { + if ps.msgs[topic] == nil { + return nil + } + set := ps.msgs[topic][partition] + if set == nil { + return nil + } + ps.bufferBytes -= set.bufferBytes + ps.bufferCount -= len(set.msgs) + delete(ps.msgs[topic], partition) + return set.msgs +} + +func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { + version := 1 + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + version = 2 + } + + switch { + // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. + case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)): + return true + // Would we overflow the size-limit of a compressed message-batch for this partition? + case ps.parent.conf.Producer.Compression != CompressionNone && + ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && + ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: + return true + // Would we overflow simply in number of messages? + case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages: + return true + default: + return false + } +} + +func (ps *produceSet) readyToFlush() bool { + switch { + // If we don't have any messages, nothing else matters + case ps.empty(): + return false + // If all three config values are 0, we always flush as-fast-as-possible + case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0: + return true + // If we've passed the message trigger-point + case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages: + return true + // If we've passed the byte trigger-point + case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes: + return true + default: + return false + } +} + +func (ps *produceSet) empty() bool { + return ps.bufferCount == 0 +} diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go new file mode 100644 index 0000000000..23045e7d33 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/real_decoder.go @@ -0,0 +1,324 @@ +package sarama + +import ( + "encoding/binary" + "math" +) + +var errInvalidArrayLength = PacketDecodingError{"invalid array length"} +var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} +var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"} +var errInvalidStringLength = PacketDecodingError{"invalid string length"} +var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"} +var errVarintOverflow = PacketDecodingError{"varint overflow"} +var errInvalidBool = PacketDecodingError{"invalid bool"} + +type realDecoder struct { + raw []byte + off int + stack []pushDecoder +} + +// primitives + +func (rd *realDecoder) getInt8() (int8, error) { + if rd.remaining() < 1 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int8(rd.raw[rd.off]) + rd.off++ + return tmp, nil +} + +func (rd *realDecoder) getInt16() (int16, error) { + if rd.remaining() < 2 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:])) + rd.off += 2 + return tmp, nil +} + +func (rd *realDecoder) getInt32() (int32, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + return tmp, nil +} + +func (rd *realDecoder) getInt64() (int64, error) { + if rd.remaining() < 8 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + return tmp, nil +} + +func (rd *realDecoder) getVarint() (int64, error) { + tmp, n := binary.Varint(rd.raw[rd.off:]) + if n == 0 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + if n < 0 { + rd.off -= n + return -1, errVarintOverflow + } + rd.off += n + return tmp, nil +} + +func (rd *realDecoder) getArrayLength() (int, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))) + rd.off += 4 + if tmp > rd.remaining() { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } else if tmp > 2*math.MaxUint16 { + return -1, errInvalidArrayLength + } + return tmp, nil +} + +func (rd *realDecoder) getBool() (bool, error) { + b, err := rd.getInt8() + if err != nil || b == 0 { + return false, err + } + if b != 1 { + return false, errInvalidBool + } + return true, nil +} + +// collections + +func (rd *realDecoder) getBytes() ([]byte, error) { + tmp, err := rd.getInt32() + if err != nil { + return nil, err + } + if tmp == -1 { + return nil, nil + } + + return rd.getRawBytes(int(tmp)) +} + +func (rd *realDecoder) getVarintBytes() ([]byte, error) { + tmp, err := rd.getVarint() + if err != nil { + return nil, err + } + if tmp == -1 { + return nil, nil + } + + return rd.getRawBytes(int(tmp)) +} + +func (rd *realDecoder) getStringLength() (int, error) { + length, err := rd.getInt16() + if err != nil { + return 0, err + } + + n := int(length) + + switch { + case n < -1: + return 0, errInvalidStringLength + case n > rd.remaining(): + rd.off = len(rd.raw) + return 0, ErrInsufficientData + } + + return n, nil +} + +func (rd *realDecoder) getString() (string, error) { + n, err := rd.getStringLength() + if err != nil || n == -1 { + return "", err + } + + tmpStr := string(rd.raw[rd.off : rd.off+n]) + rd.off += n + return tmpStr, nil +} + +func (rd *realDecoder) getNullableString() (*string, error) { + n, err := rd.getStringLength() + if err != nil || n == -1 { + return nil, err + } + + tmpStr := string(rd.raw[rd.off : rd.off+n]) + rd.off += n + return &tmpStr, err +} + +func (rd *realDecoder) getInt32Array() ([]int32, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if rd.remaining() < 4*n { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]int32, n) + for i := range ret { + ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + } + return ret, nil +} + +func (rd *realDecoder) getInt64Array() ([]int64, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if rd.remaining() < 8*n { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]int64, n) + for i := range ret { + ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + } + return ret, nil +} + +func (rd *realDecoder) getStringArray() ([]string, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]string, n) + for i := range ret { + str, err := rd.getString() + if err != nil { + return nil, err + } + + ret[i] = str + } + return ret, nil +} + +// subsets + +func (rd *realDecoder) remaining() int { + return len(rd.raw) - rd.off +} + +func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { + buf, err := rd.getRawBytes(length) + if err != nil { + return nil, err + } + return &realDecoder{raw: buf}, nil +} + +func (rd *realDecoder) getRawBytes(length int) ([]byte, error) { + if length < 0 { + return nil, errInvalidByteSliceLength + } else if length > rd.remaining() { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + start := rd.off + rd.off += length + return rd.raw[start:rd.off], nil +} + +func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) { + if rd.remaining() < offset+length { + return nil, ErrInsufficientData + } + off := rd.off + offset + return &realDecoder{raw: rd.raw[off : off+length]}, nil +} + +// stacks + +func (rd *realDecoder) push(in pushDecoder) error { + in.saveOffset(rd.off) + + var reserve int + if dpd, ok := in.(dynamicPushDecoder); ok { + if err := dpd.decode(rd); err != nil { + return err + } + } else { + reserve = in.reserveLength() + if rd.remaining() < reserve { + rd.off = len(rd.raw) + return ErrInsufficientData + } + } + + rd.stack = append(rd.stack, in) + + rd.off += reserve + + return nil +} + +func (rd *realDecoder) pop() error { + // this is go's ugly pop pattern (the inverse of append) + in := rd.stack[len(rd.stack)-1] + rd.stack = rd.stack[:len(rd.stack)-1] + + return in.check(rd.off, rd.raw) +} diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go new file mode 100644 index 0000000000..3c75387f77 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/real_encoder.go @@ -0,0 +1,156 @@ +package sarama + +import ( + "encoding/binary" + + "github.com/rcrowley/go-metrics" +) + +type realEncoder struct { + raw []byte + off int + stack []pushEncoder + registry metrics.Registry +} + +// primitives + +func (re *realEncoder) putInt8(in int8) { + re.raw[re.off] = byte(in) + re.off++ +} + +func (re *realEncoder) putInt16(in int16) { + binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in)) + re.off += 2 +} + +func (re *realEncoder) putInt32(in int32) { + binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in)) + re.off += 4 +} + +func (re *realEncoder) putInt64(in int64) { + binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in)) + re.off += 8 +} + +func (re *realEncoder) putVarint(in int64) { + re.off += binary.PutVarint(re.raw[re.off:], in) +} + +func (re *realEncoder) putArrayLength(in int) error { + re.putInt32(int32(in)) + return nil +} + +func (re *realEncoder) putBool(in bool) { + if in { + re.putInt8(1) + return + } + re.putInt8(0) +} + +// collection + +func (re *realEncoder) putRawBytes(in []byte) error { + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putBytes(in []byte) error { + if in == nil { + re.putInt32(-1) + return nil + } + re.putInt32(int32(len(in))) + return re.putRawBytes(in) +} + +func (re *realEncoder) putVarintBytes(in []byte) error { + if in == nil { + re.putVarint(-1) + return nil + } + re.putVarint(int64(len(in))) + return re.putRawBytes(in) +} + +func (re *realEncoder) putString(in string) error { + re.putInt16(int16(len(in))) + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putNullableString(in *string) error { + if in == nil { + re.putInt16(-1) + return nil + } + return re.putString(*in) +} + +func (re *realEncoder) putStringArray(in []string) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + + for _, val := range in { + if err := re.putString(val); err != nil { + return err + } + } + + return nil +} + +func (re *realEncoder) putInt32Array(in []int32) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + for _, val := range in { + re.putInt32(val) + } + return nil +} + +func (re *realEncoder) putInt64Array(in []int64) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + for _, val := range in { + re.putInt64(val) + } + return nil +} + +func (re *realEncoder) offset() int { + return re.off +} + +// stacks + +func (re *realEncoder) push(in pushEncoder) { + in.saveOffset(re.off) + re.off += in.reserveLength() + re.stack = append(re.stack, in) +} + +func (re *realEncoder) pop() error { + // this is go's ugly pop pattern (the inverse of append) + in := re.stack[len(re.stack)-1] + re.stack = re.stack[:len(re.stack)-1] + + return in.run(re.off, re.raw) +} + +// we do record metrics during the real encoder pass +func (re *realEncoder) metricRegistry() metrics.Registry { + return re.registry +} diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go new file mode 100644 index 0000000000..cded308cf0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/record.go @@ -0,0 +1,113 @@ +package sarama + +import ( + "encoding/binary" + "time" +) + +const ( + controlMask = 0x20 + maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1 +) + +type RecordHeader struct { + Key []byte + Value []byte +} + +func (h *RecordHeader) encode(pe packetEncoder) error { + if err := pe.putVarintBytes(h.Key); err != nil { + return err + } + return pe.putVarintBytes(h.Value) +} + +func (h *RecordHeader) decode(pd packetDecoder) (err error) { + if h.Key, err = pd.getVarintBytes(); err != nil { + return err + } + + if h.Value, err = pd.getVarintBytes(); err != nil { + return err + } + return nil +} + +type Record struct { + Attributes int8 + TimestampDelta time.Duration + OffsetDelta int64 + Key []byte + Value []byte + Headers []*RecordHeader + + length varintLengthField +} + +func (r *Record) encode(pe packetEncoder) error { + pe.push(&r.length) + pe.putInt8(r.Attributes) + pe.putVarint(int64(r.TimestampDelta / time.Millisecond)) + pe.putVarint(r.OffsetDelta) + if err := pe.putVarintBytes(r.Key); err != nil { + return err + } + if err := pe.putVarintBytes(r.Value); err != nil { + return err + } + pe.putVarint(int64(len(r.Headers))) + + for _, h := range r.Headers { + if err := h.encode(pe); err != nil { + return err + } + } + + return pe.pop() +} + +func (r *Record) decode(pd packetDecoder) (err error) { + if err = pd.push(&r.length); err != nil { + return err + } + + if r.Attributes, err = pd.getInt8(); err != nil { + return err + } + + timestamp, err := pd.getVarint() + if err != nil { + return err + } + r.TimestampDelta = time.Duration(timestamp) * time.Millisecond + + if r.OffsetDelta, err = pd.getVarint(); err != nil { + return err + } + + if r.Key, err = pd.getVarintBytes(); err != nil { + return err + } + + if r.Value, err = pd.getVarintBytes(); err != nil { + return err + } + + numHeaders, err := pd.getVarint() + if err != nil { + return err + } + + if numHeaders >= 0 { + r.Headers = make([]*RecordHeader, numHeaders) + } + for i := int64(0); i < numHeaders; i++ { + hdr := new(RecordHeader) + if err := hdr.decode(pd); err != nil { + return err + } + r.Headers[i] = hdr + } + + return pd.pop() +} diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go new file mode 100644 index 0000000000..845318aa34 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/record_batch.go @@ -0,0 +1,268 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "time" + + "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +const recordBatchOverhead = 49 + +type recordsArray []*Record + +func (e recordsArray) encode(pe packetEncoder) error { + for _, r := range e { + if err := r.encode(pe); err != nil { + return err + } + } + return nil +} + +func (e recordsArray) decode(pd packetDecoder) error { + for i := range e { + rec := &Record{} + if err := rec.decode(pd); err != nil { + return err + } + e[i] = rec + } + return nil +} + +type RecordBatch struct { + FirstOffset int64 + PartitionLeaderEpoch int32 + Version int8 + Codec CompressionCodec + CompressionLevel int + Control bool + LastOffsetDelta int32 + FirstTimestamp time.Time + MaxTimestamp time.Time + ProducerID int64 + ProducerEpoch int16 + FirstSequence int32 + Records []*Record + PartialTrailingRecord bool + + compressedRecords []byte + recordsLen int // uncompressed records size +} + +func (b *RecordBatch) encode(pe packetEncoder) error { + if b.Version != 2 { + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} + } + pe.putInt64(b.FirstOffset) + pe.push(&lengthField{}) + pe.putInt32(b.PartitionLeaderEpoch) + pe.putInt8(b.Version) + pe.push(newCRC32Field(crcCastagnoli)) + pe.putInt16(b.computeAttributes()) + pe.putInt32(b.LastOffsetDelta) + + if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil { + return err + } + + if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil { + return err + } + + pe.putInt64(b.ProducerID) + pe.putInt16(b.ProducerEpoch) + pe.putInt32(b.FirstSequence) + + if err := pe.putArrayLength(len(b.Records)); err != nil { + return err + } + + if b.compressedRecords == nil { + if err := b.encodeRecords(pe); err != nil { + return err + } + } + if err := pe.putRawBytes(b.compressedRecords); err != nil { + return err + } + + if err := pe.pop(); err != nil { + return err + } + return pe.pop() +} + +func (b *RecordBatch) decode(pd packetDecoder) (err error) { + if b.FirstOffset, err = pd.getInt64(); err != nil { + return err + } + + batchLen, err := pd.getInt32() + if err != nil { + return err + } + + if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + + if b.Version, err = pd.getInt8(); err != nil { + return err + } + + if err = pd.push(&crc32Field{polynomial: crcCastagnoli}); err != nil { + return err + } + + attributes, err := pd.getInt16() + if err != nil { + return err + } + b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask) + b.Control = attributes&controlMask == controlMask + + if b.LastOffsetDelta, err = pd.getInt32(); err != nil { + return err + } + + if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil { + return err + } + + if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil { + return err + } + + if b.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if b.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + if b.FirstSequence, err = pd.getInt32(); err != nil { + return err + } + + numRecs, err := pd.getArrayLength() + if err != nil { + return err + } + if numRecs >= 0 { + b.Records = make([]*Record, numRecs) + } + + bufSize := int(batchLen) - recordBatchOverhead + recBuffer, err := pd.getRawBytes(bufSize) + if err != nil { + if err == ErrInsufficientData { + b.PartialTrailingRecord = true + b.Records = nil + return nil + } + return err + } + + if err = pd.pop(); err != nil { + return err + } + + switch b.Codec { + case CompressionNone: + case CompressionGZIP: + reader, err := gzip.NewReader(bytes.NewReader(recBuffer)) + if err != nil { + return err + } + if recBuffer, err = ioutil.ReadAll(reader); err != nil { + return err + } + case CompressionSnappy: + if recBuffer, err = snappy.Decode(recBuffer); err != nil { + return err + } + case CompressionLZ4: + reader := lz4.NewReader(bytes.NewReader(recBuffer)) + if recBuffer, err = ioutil.ReadAll(reader); err != nil { + return err + } + default: + return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", b.Codec)} + } + + b.recordsLen = len(recBuffer) + err = decode(recBuffer, recordsArray(b.Records)) + if err == ErrInsufficientData { + b.PartialTrailingRecord = true + b.Records = nil + return nil + } + return err +} + +func (b *RecordBatch) encodeRecords(pe packetEncoder) error { + var raw []byte + var err error + if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil { + return err + } + b.recordsLen = len(raw) + + switch b.Codec { + case CompressionNone: + b.compressedRecords = raw + case CompressionGZIP: + var buf bytes.Buffer + var writer *gzip.Writer + if b.CompressionLevel != CompressionLevelDefault { + writer, err = gzip.NewWriterLevel(&buf, b.CompressionLevel) + if err != nil { + return err + } + } else { + writer = gzip.NewWriter(&buf) + } + if _, err := writer.Write(raw); err != nil { + return err + } + if err := writer.Close(); err != nil { + return err + } + b.compressedRecords = buf.Bytes() + case CompressionSnappy: + b.compressedRecords = snappy.Encode(raw) + case CompressionLZ4: + var buf bytes.Buffer + writer := lz4.NewWriter(&buf) + if _, err := writer.Write(raw); err != nil { + return err + } + if err := writer.Close(); err != nil { + return err + } + b.compressedRecords = buf.Bytes() + default: + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} + } + + return nil +} + +func (b *RecordBatch) computeAttributes() int16 { + attr := int16(b.Codec) & int16(compressionCodecMask) + if b.Control { + attr |= controlMask + } + return attr +} + +func (b *RecordBatch) addRecord(r *Record) { + b.Records = append(b.Records, r) +} diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go new file mode 100644 index 0000000000..301055bb07 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/records.go @@ -0,0 +1,173 @@ +package sarama + +import "fmt" + +const ( + unknownRecords = iota + legacyRecords + defaultRecords + + magicOffset = 16 + magicLength = 1 +) + +// Records implements a union type containing either a RecordBatch or a legacy MessageSet. +type Records struct { + recordsType int + MsgSet *MessageSet + RecordBatch *RecordBatch +} + +func newLegacyRecords(msgSet *MessageSet) Records { + return Records{recordsType: legacyRecords, MsgSet: msgSet} +} + +func newDefaultRecords(batch *RecordBatch) Records { + return Records{recordsType: defaultRecords, RecordBatch: batch} +} + +// setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil. +// The first return value indicates whether both fields are nil (and the type is not set). +// If both fields are not nil, it returns an error. +func (r *Records) setTypeFromFields() (bool, error) { + if r.MsgSet == nil && r.RecordBatch == nil { + return true, nil + } + if r.MsgSet != nil && r.RecordBatch != nil { + return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown") + } + r.recordsType = defaultRecords + if r.MsgSet != nil { + r.recordsType = legacyRecords + } + return false, nil +} + +func (r *Records) encode(pe packetEncoder) error { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return err + } + } + + switch r.recordsType { + case legacyRecords: + if r.MsgSet == nil { + return nil + } + return r.MsgSet.encode(pe) + case defaultRecords: + if r.RecordBatch == nil { + return nil + } + return r.RecordBatch.encode(pe) + } + + return fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) setTypeFromMagic(pd packetDecoder) error { + magic, err := magicValue(pd) + if err != nil { + return err + } + + r.recordsType = defaultRecords + if magic < 2 { + r.recordsType = legacyRecords + } + + return nil +} + +func (r *Records) decode(pd packetDecoder) error { + if r.recordsType == unknownRecords { + if err := r.setTypeFromMagic(pd); err != nil { + return err + } + } + + switch r.recordsType { + case legacyRecords: + r.MsgSet = &MessageSet{} + return r.MsgSet.decode(pd) + case defaultRecords: + r.RecordBatch = &RecordBatch{} + return r.RecordBatch.decode(pd) + } + return fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) numRecords() (int, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return 0, err + } + } + + switch r.recordsType { + case legacyRecords: + if r.MsgSet == nil { + return 0, nil + } + return len(r.MsgSet.Messages), nil + case defaultRecords: + if r.RecordBatch == nil { + return 0, nil + } + return len(r.RecordBatch.Records), nil + } + return 0, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) isPartial() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case unknownRecords: + return false, nil + case legacyRecords: + if r.MsgSet == nil { + return false, nil + } + return r.MsgSet.PartialTrailingMessage, nil + case defaultRecords: + if r.RecordBatch == nil { + return false, nil + } + return r.RecordBatch.PartialTrailingRecord, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) isControl() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case legacyRecords: + return false, nil + case defaultRecords: + if r.RecordBatch == nil { + return false, nil + } + return r.RecordBatch.Control, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func magicValue(pd packetDecoder) (int8, error) { + dec, err := pd.peek(magicOffset, magicLength) + if err != nil { + return 0, err + } + + return dec.getInt8() +} diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go new file mode 100644 index 0000000000..4d211a14f1 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/request.go @@ -0,0 +1,149 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "io" +) + +type protocolBody interface { + encoder + versionedDecoder + key() int16 + version() int16 + requiredVersion() KafkaVersion +} + +type request struct { + correlationID int32 + clientID string + body protocolBody +} + +func (r *request) encode(pe packetEncoder) (err error) { + pe.push(&lengthField{}) + pe.putInt16(r.body.key()) + pe.putInt16(r.body.version()) + pe.putInt32(r.correlationID) + err = pe.putString(r.clientID) + if err != nil { + return err + } + err = r.body.encode(pe) + if err != nil { + return err + } + return pe.pop() +} + +func (r *request) decode(pd packetDecoder) (err error) { + var key int16 + if key, err = pd.getInt16(); err != nil { + return err + } + var version int16 + if version, err = pd.getInt16(); err != nil { + return err + } + if r.correlationID, err = pd.getInt32(); err != nil { + return err + } + r.clientID, err = pd.getString() + + r.body = allocateBody(key, version) + if r.body == nil { + return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} + } + return r.body.decode(pd, version) +} + +func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) { + lengthBytes := make([]byte, 4) + if _, err := io.ReadFull(r, lengthBytes); err != nil { + return nil, bytesRead, err + } + bytesRead += len(lengthBytes) + + length := int32(binary.BigEndian.Uint32(lengthBytes)) + if length <= 4 || length > MaxRequestSize { + return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} + } + + encodedReq := make([]byte, length) + if _, err := io.ReadFull(r, encodedReq); err != nil { + return nil, bytesRead, err + } + bytesRead += len(encodedReq) + + req = &request{} + if err := decode(encodedReq, req); err != nil { + return nil, bytesRead, err + } + return req, bytesRead, nil +} + +func allocateBody(key, version int16) protocolBody { + switch key { + case 0: + return &ProduceRequest{} + case 1: + return &FetchRequest{} + case 2: + return &OffsetRequest{Version: version} + case 3: + return &MetadataRequest{} + case 8: + return &OffsetCommitRequest{Version: version} + case 9: + return &OffsetFetchRequest{} + case 10: + return &FindCoordinatorRequest{} + case 11: + return &JoinGroupRequest{} + case 12: + return &HeartbeatRequest{} + case 13: + return &LeaveGroupRequest{} + case 14: + return &SyncGroupRequest{} + case 15: + return &DescribeGroupsRequest{} + case 16: + return &ListGroupsRequest{} + case 17: + return &SaslHandshakeRequest{} + case 18: + return &ApiVersionsRequest{} + case 19: + return &CreateTopicsRequest{} + case 20: + return &DeleteTopicsRequest{} + case 21: + return &DeleteRecordsRequest{} + case 22: + return &InitProducerIDRequest{} + case 24: + return &AddPartitionsToTxnRequest{} + case 25: + return &AddOffsetsToTxnRequest{} + case 26: + return &EndTxnRequest{} + case 28: + return &TxnOffsetCommitRequest{} + case 29: + return &DescribeAclsRequest{} + case 30: + return &CreateAclsRequest{} + case 31: + return &DeleteAclsRequest{} + case 32: + return &DescribeConfigsRequest{} + case 33: + return &AlterConfigsRequest{} + case 37: + return &CreatePartitionsRequest{} + case 42: + return &DeleteGroupsRequest{} + } + return nil +} diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go new file mode 100644 index 0000000000..f3f4d27d6c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/response_header.go @@ -0,0 +1,21 @@ +package sarama + +import "fmt" + +type responseHeader struct { + length int32 + correlationID int32 +} + +func (r *responseHeader) decode(pd packetDecoder) (err error) { + r.length, err = pd.getInt32() + if err != nil { + return err + } + if r.length <= 4 || r.length > MaxResponseSize { + return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)} + } + + r.correlationID, err = pd.getInt32() + return err +} diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go new file mode 100644 index 0000000000..7d5dc60d3e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sarama.go @@ -0,0 +1,99 @@ +/* +Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level +API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level +API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation. + +To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel +and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases. +The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be +useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees +depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the +SyncProducer can still sometimes be lost. + +To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic +consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the +https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 +and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. + +For lower-level needs, the Broker and Request/Response objects permit precise control over each connection +and message sent on the wire; the Client provides higher-level metadata management that is shared between +the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up +exactly with the protocol fields documented by Kafka at +https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol + +Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry. + +Broker related metrics: + + +----------------------------------------------+------------+---------------------------------------------------------------+ + | Name | Type | Description | + +----------------------------------------------+------------+---------------------------------------------------------------+ + | incoming-byte-rate | meter | Bytes/second read off all brokers | + | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker | + | outgoing-byte-rate | meter | Bytes/second written off all brokers | + | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker | + | request-rate | meter | Requests/second sent to all brokers | + | request-rate-for-broker- | meter | Requests/second sent to a given broker | + | request-size | histogram | Distribution of the request size in bytes for all brokers | + | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker | + | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers | + | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker | + | response-rate | meter | Responses/second received from all brokers | + | response-rate-for-broker- | meter | Responses/second received from a given broker | + | response-size | histogram | Distribution of the response size in bytes for all brokers | + | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | + +----------------------------------------------+------------+---------------------------------------------------------------+ + +Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics. + +Producer related metrics: + + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | Name | Type | Description | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics | + | batch-size-for-topic- | histogram | Distribution of the number of bytes sent per partition per request for a given topic | + | record-send-rate | meter | Records/second sent to all topics | + | record-send-rate-for-topic- | meter | Records/second sent to a given topic | + | records-per-request | histogram | Distribution of the number of records sent per request for all topics | + | records-per-request-for-topic- | histogram | Distribution of the number of records sent per request for a given topic | + | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics | + | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + +*/ +package sarama + +import ( + "io/ioutil" + "log" +) + +// Logger is the instance of a StdLogger interface that Sarama writes connection +// management events to. By default it is set to discard all log messages via ioutil.Discard, +// but you can set it to redirect wherever you want. +var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) + +// StdLogger is used to log error messages. +type StdLogger interface { + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} + +// PanicHandler is called for recovering from panics spawned internally to the library (and thus +// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. +var PanicHandler func(interface{}) + +// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying +// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned +// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt +// to process. +var MaxRequestSize int32 = 100 * 1024 * 1024 + +// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If +// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to +// protect the client from running out of memory. Please note that brokers do not have any natural limit on +// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers +// (see https://issues.apache.org/jira/browse/KAFKA-2063). +var MaxResponseSize int32 = 100 * 1024 * 1024 diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go new file mode 100644 index 0000000000..fbbc8947b2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go @@ -0,0 +1,33 @@ +package sarama + +type SaslHandshakeRequest struct { + Mechanism string +} + +func (r *SaslHandshakeRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.Mechanism); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) { + if r.Mechanism, err = pd.getString(); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeRequest) key() int16 { + return 17 +} + +func (r *SaslHandshakeRequest) version() int16 { + return 0 +} + +func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go new file mode 100644 index 0000000000..ef290d4bc6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go @@ -0,0 +1,38 @@ +package sarama + +type SaslHandshakeResponse struct { + Err KError + EnabledMechanisms []string +} + +func (r *SaslHandshakeResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return pe.putStringArray(r.EnabledMechanisms) +} + +func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.EnabledMechanisms, err = pd.getStringArray(); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeResponse) key() int16 { + return 17 +} + +func (r *SaslHandshakeResponse) version() int16 { + return 0 +} + +func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go new file mode 100644 index 0000000000..fe207080e0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_group_request.go @@ -0,0 +1,100 @@ +package sarama + +type SyncGroupRequest struct { + GroupId string + GenerationId int32 + MemberId string + GroupAssignments map[string][]byte +} + +func (r *SyncGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.MemberId); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil { + return err + } + for memberId, memberAssignment := range r.GroupAssignments { + if err := pe.putString(memberId); err != nil { + return err + } + if err := pe.putBytes(memberAssignment); err != nil { + return err + } + } + + return nil +} + +func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupAssignments = make(map[string][]byte) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + memberAssignment, err := pd.getBytes() + if err != nil { + return err + } + + r.GroupAssignments[memberId] = memberAssignment + } + + return nil +} + +func (r *SyncGroupRequest) key() int16 { + return 14 +} + +func (r *SyncGroupRequest) version() int16 { + return 0 +} + +func (r *SyncGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) { + if r.GroupAssignments == nil { + r.GroupAssignments = make(map[string][]byte) + } + + r.GroupAssignments[memberId] = memberAssignment +} + +func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error { + bin, err := encode(memberAssignment, nil) + if err != nil { + return err + } + + r.AddGroupAssignment(memberId, bin) + return nil +} diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go new file mode 100644 index 0000000000..194b382b4a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_group_response.go @@ -0,0 +1,41 @@ +package sarama + +type SyncGroupResponse struct { + Err KError + MemberAssignment []byte +} + +func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { + assignment := new(ConsumerGroupMemberAssignment) + err := decode(r.MemberAssignment, assignment) + return assignment, err +} + +func (r *SyncGroupResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return pe.putBytes(r.MemberAssignment) +} + +func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + r.MemberAssignment, err = pd.getBytes() + return +} + +func (r *SyncGroupResponse) key() int16 { + return 14 +} + +func (r *SyncGroupResponse) version() int16 { + return 0 +} + +func (r *SyncGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go new file mode 100644 index 0000000000..dd096b6db6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_producer.go @@ -0,0 +1,164 @@ +package sarama + +import "sync" + +// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct +// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer +// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope. +// +// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual +// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`. +// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. +// +// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to +// be set to true in its configuration. +type SyncProducer interface { + + // SendMessage produces a given message, and returns only when it either has + // succeeded or failed to produce. It will return the partition and the offset + // of the produced message, or an error if the message failed to produce. + SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) + + // SendMessages produces a given set of messages, and returns only when all + // messages in the set have either succeeded or failed. Note that messages + // can succeed and fail individually; if some succeed and some fail, + // SendMessages will return an error. + SendMessages(msgs []*ProducerMessage) error + + // Close shuts down the producer and waits for any buffered messages to be + // flushed. You must call this function before a producer object passes out of + // scope, as it may otherwise leak memory. You must call this before calling + // Close on the underlying client. + Close() error +} + +type syncProducer struct { + producer *asyncProducer + wg sync.WaitGroup +} + +// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration. +func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { + if config == nil { + config = NewConfig() + config.Producer.Return.Successes = true + } + + if err := verifyProducerConfig(config); err != nil { + return nil, err + } + + p, err := NewAsyncProducer(addrs, config) + if err != nil { + return nil, err + } + return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil +} + +// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this producer. +func NewSyncProducerFromClient(client Client) (SyncProducer, error) { + if err := verifyProducerConfig(client.Config()); err != nil { + return nil, err + } + + p, err := NewAsyncProducerFromClient(client) + if err != nil { + return nil, err + } + return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil +} + +func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { + sp := &syncProducer{producer: p} + + sp.wg.Add(2) + go withRecover(sp.handleSuccesses) + go withRecover(sp.handleErrors) + + return sp +} + +func verifyProducerConfig(config *Config) error { + if !config.Producer.Return.Errors { + return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer") + } + if !config.Producer.Return.Successes { + return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer") + } + return nil +} + +func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { + oldMetadata := msg.Metadata + defer func() { + msg.Metadata = oldMetadata + }() + + expectation := make(chan *ProducerError, 1) + msg.Metadata = expectation + sp.producer.Input() <- msg + + if err := <-expectation; err != nil { + return -1, -1, err.Err + } + + return msg.Partition, msg.Offset, nil +} + +func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { + savedMetadata := make([]interface{}, len(msgs)) + for i := range msgs { + savedMetadata[i] = msgs[i].Metadata + } + defer func() { + for i := range msgs { + msgs[i].Metadata = savedMetadata[i] + } + }() + + expectations := make(chan chan *ProducerError, len(msgs)) + go func() { + for _, msg := range msgs { + expectation := make(chan *ProducerError, 1) + msg.Metadata = expectation + sp.producer.Input() <- msg + expectations <- expectation + } + close(expectations) + }() + + var errors ProducerErrors + for expectation := range expectations { + if err := <-expectation; err != nil { + errors = append(errors, err) + } + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (sp *syncProducer) handleSuccesses() { + defer sp.wg.Done() + for msg := range sp.producer.Successes() { + expectation := msg.Metadata.(chan *ProducerError) + expectation <- nil + } +} + +func (sp *syncProducer) handleErrors() { + defer sp.wg.Done() + for err := range sp.producer.Errors() { + expectation := err.Msg.Metadata.(chan *ProducerError) + expectation <- err + } +} + +func (sp *syncProducer) Close() error { + sp.producer.AsyncClose() + sp.wg.Wait() + return nil +} diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/Shopify/sarama/timestamp.go new file mode 100644 index 0000000000..372278d0bf --- /dev/null +++ b/vendor/github.com/Shopify/sarama/timestamp.go @@ -0,0 +1,40 @@ +package sarama + +import ( + "fmt" + "time" +) + +type Timestamp struct { + *time.Time +} + +func (t Timestamp) encode(pe packetEncoder) error { + timestamp := int64(-1) + + if !t.Before(time.Unix(0, 0)) { + timestamp = t.UnixNano() / int64(time.Millisecond) + } else if !t.IsZero() { + return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)} + } + + pe.putInt64(timestamp) + return nil +} + +func (t Timestamp) decode(pd packetDecoder) error { + millis, err := pd.getInt64() + if err != nil { + return err + } + + // negative timestamps are invalid, in these cases we should return + // a zero time + timestamp := time.Time{} + if millis >= 0 { + timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + + *t.Time = timestamp + return nil +} diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go new file mode 100644 index 0000000000..71e95b814c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go @@ -0,0 +1,126 @@ +package sarama + +type TxnOffsetCommitRequest struct { + TransactionalID string + GroupID string + ProducerID int64 + ProducerEpoch int16 + Topics map[string][]*PartitionOffsetMetadata +} + +func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error { + if err := pe.putString(t.TransactionalID); err != nil { + return err + } + if err := pe.putString(t.GroupID); err != nil { + return err + } + pe.putInt64(t.ProducerID) + pe.putInt16(t.ProducerEpoch) + + if err := pe.putArrayLength(len(t.Topics)); err != nil { + return err + } + for topic, partitions := range t.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for _, partition := range partitions { + if err := partition.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { + if t.TransactionalID, err = pd.getString(); err != nil { + return err + } + if t.GroupID, err = pd.getString(); err != nil { + return err + } + if t.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if t.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics = make(map[string][]*PartitionOffsetMetadata) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics[topic] = make([]*PartitionOffsetMetadata, m) + + for j := 0; j < m; j++ { + partitionOffsetMetadata := new(PartitionOffsetMetadata) + if err := partitionOffsetMetadata.decode(pd, version); err != nil { + return err + } + t.Topics[topic][j] = partitionOffsetMetadata + } + } + + return nil +} + +func (a *TxnOffsetCommitRequest) key() int16 { + return 28 +} + +func (a *TxnOffsetCommitRequest) version() int16 { + return 0 +} + +func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type PartitionOffsetMetadata struct { + Partition int32 + Offset int64 + Metadata *string +} + +func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error { + pe.putInt32(p.Partition) + pe.putInt64(p.Offset) + if err := pe.putNullableString(p.Metadata); err != nil { + return err + } + + return nil +} + +func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) { + if p.Partition, err = pd.getInt32(); err != nil { + return err + } + if p.Offset, err = pd.getInt64(); err != nil { + return err + } + if p.Metadata, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go new file mode 100644 index 0000000000..6c980f4066 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go @@ -0,0 +1,83 @@ +package sarama + +import ( + "time" +) + +type TxnOffsetCommitResponse struct { + ThrottleTime time.Duration + Topics map[string][]*PartitionError +} + +func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(t.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(t.Topics)); err != nil { + return err + } + + for topic, e := range t.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(e)); err != nil { + return err + } + for _, partitionError := range e { + if err := partitionError.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics = make(map[string][]*PartitionError) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics[topic] = make([]*PartitionError, m) + + for j := 0; j < m; j++ { + t.Topics[topic][j] = new(PartitionError) + if err := t.Topics[topic][j].decode(pd, version); err != nil { + return err + } + } + } + + return nil +} + +func (a *TxnOffsetCommitResponse) key() int16 { + return 28 +} + +func (a *TxnOffsetCommitResponse) version() int16 { + return 0 +} + +func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go new file mode 100644 index 0000000000..702e226270 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/utils.go @@ -0,0 +1,212 @@ +package sarama + +import ( + "bufio" + "fmt" + "net" + "regexp" +) + +type none struct{} + +// make []int32 sortable so we can sort partition numbers +type int32Slice []int32 + +func (slice int32Slice) Len() int { + return len(slice) +} + +func (slice int32Slice) Less(i, j int) bool { + return slice[i] < slice[j] +} + +func (slice int32Slice) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func dupInt32Slice(input []int32) []int32 { + ret := make([]int32, 0, len(input)) + for _, val := range input { + ret = append(ret, val) + } + return ret +} + +func withRecover(fn func()) { + defer func() { + handler := PanicHandler + if handler != nil { + if err := recover(); err != nil { + handler(err) + } + } + }() + + fn() +} + +func safeAsyncClose(b *Broker) { + tmp := b // local var prevents clobbering in goroutine + go withRecover(func() { + if connected, _ := tmp.Connected(); connected { + if err := tmp.Close(); err != nil { + Logger.Println("Error closing broker", tmp.ID(), ":", err) + } + } + }) +} + +// Encoder is a simple interface for any type that can be encoded as an array of bytes +// in order to be sent as the key or value of a Kafka message. Length() is provided as an +// optimization, and must return the same as len() on the result of Encode(). +type Encoder interface { + Encode() ([]byte, error) + Length() int +} + +// make strings and byte slices encodable for convenience so they can be used as keys +// and/or values in kafka messages + +// StringEncoder implements the Encoder interface for Go strings so that they can be used +// as the Key or Value in a ProducerMessage. +type StringEncoder string + +func (s StringEncoder) Encode() ([]byte, error) { + return []byte(s), nil +} + +func (s StringEncoder) Length() int { + return len(s) +} + +// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used +// as the Key or Value in a ProducerMessage. +type ByteEncoder []byte + +func (b ByteEncoder) Encode() ([]byte, error) { + return b, nil +} + +func (b ByteEncoder) Length() int { + return len(b) +} + +// bufConn wraps a net.Conn with a buffer for reads to reduce the number of +// reads that trigger syscalls. +type bufConn struct { + net.Conn + buf *bufio.Reader +} + +func newBufConn(conn net.Conn) *bufConn { + return &bufConn{ + Conn: conn, + buf: bufio.NewReader(conn), + } +} + +func (bc *bufConn) Read(b []byte) (n int, err error) { + return bc.buf.Read(b) +} + +// KafkaVersion instances represent versions of the upstream Kafka broker. +type KafkaVersion struct { + // it's a struct rather than just typing the array directly to make it opaque and stop people + // generating their own arbitrary versions + version [4]uint +} + +func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion { + return KafkaVersion{ + version: [4]uint{major, minor, veryMinor, patch}, + } +} + +// IsAtLeast return true if and only if the version it is called on is +// greater than or equal to the version passed in: +// V1.IsAtLeast(V2) // false +// V2.IsAtLeast(V1) // true +func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool { + for i := range v.version { + if v.version[i] > other.version[i] { + return true + } else if v.version[i] < other.version[i] { + return false + } + } + return true +} + +// Effective constants defining the supported kafka versions. +var ( + V0_8_2_0 = newKafkaVersion(0, 8, 2, 0) + V0_8_2_1 = newKafkaVersion(0, 8, 2, 1) + V0_8_2_2 = newKafkaVersion(0, 8, 2, 2) + V0_9_0_0 = newKafkaVersion(0, 9, 0, 0) + V0_9_0_1 = newKafkaVersion(0, 9, 0, 1) + V0_10_0_0 = newKafkaVersion(0, 10, 0, 0) + V0_10_0_1 = newKafkaVersion(0, 10, 0, 1) + V0_10_1_0 = newKafkaVersion(0, 10, 1, 0) + V0_10_1_1 = newKafkaVersion(0, 10, 1, 1) + V0_10_2_0 = newKafkaVersion(0, 10, 2, 0) + V0_10_2_1 = newKafkaVersion(0, 10, 2, 1) + V0_11_0_0 = newKafkaVersion(0, 11, 0, 0) + V0_11_0_1 = newKafkaVersion(0, 11, 0, 1) + V0_11_0_2 = newKafkaVersion(0, 11, 0, 2) + V1_0_0_0 = newKafkaVersion(1, 0, 0, 0) + V1_1_0_0 = newKafkaVersion(1, 1, 0, 0) + + SupportedVersions = []KafkaVersion{ + V0_8_2_0, + V0_8_2_1, + V0_8_2_2, + V0_9_0_0, + V0_9_0_1, + V0_10_0_0, + V0_10_0_1, + V0_10_1_0, + V0_10_1_1, + V0_10_2_0, + V0_10_2_1, + V0_11_0_0, + V0_11_0_1, + V0_11_0_2, + V1_0_0_0, + V1_1_0_0, + } + MinVersion = V0_8_2_0 + MaxVersion = V1_1_0_0 +) + +func ParseKafkaVersion(s string) (KafkaVersion, error) { + if len(s) < 5 { + return MinVersion, fmt.Errorf("invalid version `%s`", s) + } + var major, minor, veryMinor, patch uint + var err error + if s[0] == '0' { + err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch}) + } else { + err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor}) + } + if err != nil { + return MinVersion, err + } + return newKafkaVersion(major, minor, veryMinor, patch), nil +} + +func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error { + if !regexp.MustCompile(pattern).MatchString(s) { + return fmt.Errorf("invalid version `%s`", s) + } + _, err := fmt.Sscanf(s, format, v[0], v[1], v[2]) + return err +} + +func (v KafkaVersion) String() string { + if v.version[0] == 0 { + return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3]) + } else { + return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2]) + } +} diff --git a/vendor/github.com/bsm/sarama-cluster/.gitignore b/vendor/github.com/bsm/sarama-cluster/.gitignore new file mode 100644 index 0000000000..88113c5b27 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/.gitignore @@ -0,0 +1,4 @@ +*.log +*.pid +kafka*/ +vendor/ diff --git a/vendor/github.com/bsm/sarama-cluster/.travis.yml b/vendor/github.com/bsm/sarama-cluster/.travis.yml new file mode 100644 index 0000000000..d9d5efcc20 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/.travis.yml @@ -0,0 +1,19 @@ +sudo: false +language: go +go: + - 1.9.x + - 1.8.x +install: + - go get -u github.com/golang/dep/cmd/dep + - dep ensure +env: + - SCALA_VERSION=2.11 KAFKA_VERSION=0.10.1.1 + - SCALA_VERSION=2.12 KAFKA_VERSION=0.10.2.1 + - SCALA_VERSION=2.12 KAFKA_VERSION=0.11.0.1 + - SCALA_VERSION=2.12 KAFKA_VERSION=1.0.0 +script: + - make default test-race +addons: + apt: + packages: + - oracle-java8-set-default diff --git a/vendor/github.com/bsm/sarama-cluster/Gopkg.lock b/vendor/github.com/bsm/sarama-cluster/Gopkg.lock new file mode 100644 index 0000000000..3ab8b6ab2c --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/Gopkg.lock @@ -0,0 +1,154 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/Shopify/sarama" + packages = ["."] + revision = "3b1b38866a79f06deddf0487d5c27ba0697ccd65" + version = "v1.15.0" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/eapache/go-resiliency" + packages = ["breaker"] + revision = "6800482f2c813e689c88b7ed3282262385011890" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "github.com/eapache/go-xerial-snappy" + packages = ["."] + revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c" + +[[projects]] + name = "github.com/eapache/queue" + packages = ["."] + revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" + version = "v1.1.0" + +[[projects]] + branch = "master" + name = "github.com/golang/snappy" + packages = ["."] + revision = "553a641470496b2327abcac10b36396bd98e45c9" + +[[projects]] + name = "github.com/onsi/ginkgo" + packages = [ + ".", + "config", + "extensions/table", + "internal/codelocation", + "internal/containernode", + "internal/failer", + "internal/leafnodes", + "internal/remote", + "internal/spec", + "internal/spec_iterator", + "internal/specrunner", + "internal/suite", + "internal/testingtproxy", + "internal/writer", + "reporters", + "reporters/stenographer", + "reporters/stenographer/support/go-colorable", + "reporters/stenographer/support/go-isatty", + "types" + ] + revision = "9eda700730cba42af70d53180f9dcce9266bc2bc" + version = "v1.4.0" + +[[projects]] + name = "github.com/onsi/gomega" + packages = [ + ".", + "format", + "internal/assertion", + "internal/asyncassertion", + "internal/oraclematcher", + "internal/testingtsupport", + "matchers", + "matchers/support/goraph/bipartitegraph", + "matchers/support/goraph/edge", + "matchers/support/goraph/node", + "matchers/support/goraph/util", + "types" + ] + revision = "003f63b7f4cff3fc95357005358af2de0f5fe152" + version = "v1.3.0" + +[[projects]] + name = "github.com/pierrec/lz4" + packages = ["."] + revision = "2fcda4cb7018ce05a25959d2fe08c83e3329f169" + version = "v1.1" + +[[projects]] + name = "github.com/pierrec/xxHash" + packages = ["xxHash32"] + revision = "f051bb7f1d1aaf1b5a665d74fb6b0217712c69f7" + version = "v0.1.1" + +[[projects]] + branch = "master" + name = "github.com/rcrowley/go-metrics" + packages = ["."] + revision = "8732c616f52954686704c8645fe1a9d59e9df7c1" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = [ + "html", + "html/atom", + "html/charset" + ] + revision = "0ed95abb35c445290478a5348a7b38bb154135fd" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix"] + revision = "3dbebcf8efb6a5011a60c2b4591c1022a759af8a" + +[[projects]] + branch = "master" + name = "golang.org/x/text" + packages = [ + "encoding", + "encoding/charmap", + "encoding/htmlindex", + "encoding/internal", + "encoding/internal/identifier", + "encoding/japanese", + "encoding/korean", + "encoding/simplifiedchinese", + "encoding/traditionalchinese", + "encoding/unicode", + "internal/gen", + "internal/tag", + "internal/utf8internal", + "language", + "runes", + "transform", + "unicode/cldr" + ] + revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3" + +[[projects]] + branch = "v2" + name = "gopkg.in/yaml.v2" + packages = ["."] + revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "2fa33a2d1ae87e0905ef09332bb4b3fda29179f6bcd48fd3b94070774b9e458b" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/bsm/sarama-cluster/Gopkg.toml b/vendor/github.com/bsm/sarama-cluster/Gopkg.toml new file mode 100644 index 0000000000..1eecfefce5 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/Gopkg.toml @@ -0,0 +1,26 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/Shopify/sarama" + version = "^1.14.0" diff --git a/vendor/github.com/bsm/sarama-cluster/LICENSE b/vendor/github.com/bsm/sarama-cluster/LICENSE new file mode 100644 index 0000000000..127751c47a --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2017 Black Square Media Ltd + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/bsm/sarama-cluster/Makefile b/vendor/github.com/bsm/sarama-cluster/Makefile new file mode 100644 index 0000000000..706f58ec17 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/Makefile @@ -0,0 +1,35 @@ +SCALA_VERSION?= 2.12 +KAFKA_VERSION?= 1.0.0 +KAFKA_DIR= kafka_$(SCALA_VERSION)-$(KAFKA_VERSION) +KAFKA_SRC= https://archive.apache.org/dist/kafka/$(KAFKA_VERSION)/$(KAFKA_DIR).tgz +KAFKA_ROOT= testdata/$(KAFKA_DIR) +PKG=$(shell go list ./... | grep -v vendor) + +default: vet test + +vet: + go vet $(PKG) + +test: testdeps + KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 + +test-verbose: testdeps + KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 -v + +test-race: testdeps + KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 -v -race + +testdeps: $(KAFKA_ROOT) + +doc: README.md + +.PHONY: test testdeps vet doc + +# --------------------------------------------------------------------- + +$(KAFKA_ROOT): + @mkdir -p $(dir $@) + cd $(dir $@) && curl -sSL $(KAFKA_SRC) | tar xz + +README.md: README.md.tpl $(wildcard *.go) + becca -package $(subst $(GOPATH)/src/,,$(PWD)) diff --git a/vendor/github.com/bsm/sarama-cluster/README.md b/vendor/github.com/bsm/sarama-cluster/README.md new file mode 100644 index 0000000000..ebcd755dad --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/README.md @@ -0,0 +1,151 @@ +# Sarama Cluster + +[![GoDoc](https://godoc.org/github.com/bsm/sarama-cluster?status.svg)](https://godoc.org/github.com/bsm/sarama-cluster) +[![Build Status](https://travis-ci.org/bsm/sarama-cluster.svg?branch=master)](https://travis-ci.org/bsm/sarama-cluster) +[![Go Report Card](https://goreportcard.com/badge/github.com/bsm/sarama-cluster)](https://goreportcard.com/report/github.com/bsm/sarama-cluster) +[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) + +Cluster extensions for [Sarama](https://github.com/Shopify/sarama), the Go client library for Apache Kafka 0.9 (and later). + +## Documentation + +Documentation and example are available via godoc at http://godoc.org/github.com/bsm/sarama-cluster + +## Examples + +Consumers have two modes of operation. In the default multiplexed mode messages (and errors) of multiple +topics and partitions are all passed to the single channel: + +```go +package main + +import ( + "fmt" + "log" + "os" + "os/signal" + + cluster "github.com/bsm/sarama-cluster" +) + +func main() { + + // init (custom) config, enable errors and notifications + config := cluster.NewConfig() + config.Consumer.Return.Errors = true + config.Group.Return.Notifications = true + + // init consumer + brokers := []string{"127.0.0.1:9092"} + topics := []string{"my_topic", "other_topic"} + consumer, err := cluster.NewConsumer(brokers, "my-consumer-group", topics, config) + if err != nil { + panic(err) + } + defer consumer.Close() + + // trap SIGINT to trigger a shutdown. + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt) + + // consume errors + go func() { + for err := range consumer.Errors() { + log.Printf("Error: %s\n", err.Error()) + } + }() + + // consume notifications + go func() { + for ntf := range consumer.Notifications() { + log.Printf("Rebalanced: %+v\n", ntf) + } + }() + + // consume messages, watch signals + for { + select { + case msg, ok := <-consumer.Messages(): + if ok { + fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value) + consumer.MarkOffset(msg, "") // mark message as processed + } + case <-signals: + return + } + } +} +``` + +Users who require access to individual partitions can use the partitioned mode which exposes access to partition-level +consumers: + +```go +package main + +import ( + "fmt" + "log" + "os" + "os/signal" + + cluster "github.com/bsm/sarama-cluster" +) + +func main() { + + // init (custom) config, set mode to ConsumerModePartitions + config := cluster.NewConfig() + config.Group.Mode = cluster.ConsumerModePartitions + + // init consumer + brokers := []string{"127.0.0.1:9092"} + topics := []string{"my_topic", "other_topic"} + consumer, err := cluster.NewConsumer(brokers, "my-consumer-group", topics, config) + if err != nil { + panic(err) + } + defer consumer.Close() + + // trap SIGINT to trigger a shutdown. + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt) + + // consume partitions + for { + select { + case part, ok := <-consumer.Partitions(): + if !ok { + return + } + + // start a separate goroutine to consume messages + go func(pc cluster.PartitionConsumer) { + for msg := range pc.Messages() { + fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value) + consumer.MarkOffset(msg, "") // mark message as processed + } + }(part) + case <-signals: + return + } + } +} +``` + +## Running tests + +You need to install Ginkgo & Gomega to run tests. Please see +http://onsi.github.io/ginkgo for more details. + +To run tests, call: + + $ make test + +## Troubleshooting + +### Consumer not receiving any messages? + +By default, sarama's `Config.Consumer.Offsets.Initial` is set to `sarama.OffsetNewest`. This means that in the event that a brand new consumer is created, and it has never committed any offsets to kafka, it will only receive messages starting from the message after the current one that was written. + +If you wish to receive all messages (from the start of all messages in the topic) in the event that a consumer does not have any offsets committed to kafka, you need to set `Config.Consumer.Offsets.Initial` to `sarama.OffsetOldest`. diff --git a/vendor/github.com/bsm/sarama-cluster/README.md.tpl b/vendor/github.com/bsm/sarama-cluster/README.md.tpl new file mode 100644 index 0000000000..5f63a690a3 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/README.md.tpl @@ -0,0 +1,67 @@ +# Sarama Cluster + +[![GoDoc](https://godoc.org/github.com/bsm/sarama-cluster?status.svg)](https://godoc.org/github.com/bsm/sarama-cluster) +[![Build Status](https://travis-ci.org/bsm/sarama-cluster.svg?branch=master)](https://travis-ci.org/bsm/sarama-cluster) +[![Go Report Card](https://goreportcard.com/badge/github.com/bsm/sarama-cluster)](https://goreportcard.com/report/github.com/bsm/sarama-cluster) +[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) + +Cluster extensions for [Sarama](https://github.com/Shopify/sarama), the Go client library for Apache Kafka 0.9 (and later). + +## Documentation + +Documentation and example are available via godoc at http://godoc.org/github.com/bsm/sarama-cluster + +## Examples + +Consumers have two modes of operation. In the default multiplexed mode messages (and errors) of multiple +topics and partitions are all passed to the single channel: + +```go +package main + +import ( + "fmt" + "log" + "os" + "os/signal" + + cluster "github.com/bsm/sarama-cluster" +) + +func main() {{ "ExampleConsumer" | code }} +``` + +Users who require access to individual partitions can use the partitioned mode which exposes access to partition-level +consumers: + +```go +package main + +import ( + "fmt" + "log" + "os" + "os/signal" + + cluster "github.com/bsm/sarama-cluster" +) + +func main() {{ "ExampleConsumer_Partitions" | code }} +``` + +## Running tests + +You need to install Ginkgo & Gomega to run tests. Please see +http://onsi.github.io/ginkgo for more details. + +To run tests, call: + + $ make test + +## Troubleshooting + +### Consumer not receiving any messages? + +By default, sarama's `Config.Consumer.Offsets.Initial` is set to `sarama.OffsetNewest`. This means that in the event that a brand new consumer is created, and it has never committed any offsets to kafka, it will only receive messages starting from the message after the current one that was written. + +If you wish to receive all messages (from the start of all messages in the topic) in the event that a consumer does not have any offsets committed to kafka, you need to set `Config.Consumer.Offsets.Initial` to `sarama.OffsetOldest`. diff --git a/vendor/github.com/bsm/sarama-cluster/balancer.go b/vendor/github.com/bsm/sarama-cluster/balancer.go new file mode 100644 index 0000000000..0f9b445ee4 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/balancer.go @@ -0,0 +1,174 @@ +package cluster + +import ( + "math" + "sort" + + "github.com/Shopify/sarama" +) + +// NotificationType defines the type of notification +type NotificationType uint8 + +// String describes the notification type +func (t NotificationType) String() string { + switch t { + case RebalanceStart: + return "rebalance start" + case RebalanceOK: + return "rebalance OK" + case RebalanceError: + return "rebalance error" + } + return "unknown" +} + +const ( + UnknownNotification NotificationType = iota + RebalanceStart + RebalanceOK + RebalanceError +) + +// Notification are state events emitted by the consumers on rebalance +type Notification struct { + // Type exposes the notification type + Type NotificationType + + // Claimed contains topic/partitions that were claimed by this rebalance cycle + Claimed map[string][]int32 + + // Released contains topic/partitions that were released as part of this rebalance cycle + Released map[string][]int32 + + // Current are topic/partitions that are currently claimed to the consumer + Current map[string][]int32 +} + +func newNotification(current map[string][]int32) *Notification { + return &Notification{ + Type: RebalanceStart, + Current: current, + } +} + +func (n *Notification) success(current map[string][]int32) *Notification { + o := &Notification{ + Type: RebalanceOK, + Claimed: make(map[string][]int32), + Released: make(map[string][]int32), + Current: current, + } + for topic, partitions := range current { + o.Claimed[topic] = int32Slice(partitions).Diff(int32Slice(n.Current[topic])) + } + for topic, partitions := range n.Current { + o.Released[topic] = int32Slice(partitions).Diff(int32Slice(current[topic])) + } + return o +} + +// -------------------------------------------------------------------- + +type topicInfo struct { + Partitions []int32 + MemberIDs []string +} + +func (info topicInfo) Perform(s Strategy) map[string][]int32 { + if s == StrategyRoundRobin { + return info.RoundRobin() + } + return info.Ranges() +} + +func (info topicInfo) Ranges() map[string][]int32 { + sort.Strings(info.MemberIDs) + + mlen := len(info.MemberIDs) + plen := len(info.Partitions) + res := make(map[string][]int32, mlen) + + for pos, memberID := range info.MemberIDs { + n, i := float64(plen)/float64(mlen), float64(pos) + min := int(math.Floor(i*n + 0.5)) + max := int(math.Floor((i+1)*n + 0.5)) + sub := info.Partitions[min:max] + if len(sub) > 0 { + res[memberID] = sub + } + } + return res +} + +func (info topicInfo) RoundRobin() map[string][]int32 { + sort.Strings(info.MemberIDs) + + mlen := len(info.MemberIDs) + res := make(map[string][]int32, mlen) + for i, pnum := range info.Partitions { + memberID := info.MemberIDs[i%mlen] + res[memberID] = append(res[memberID], pnum) + } + return res +} + +// -------------------------------------------------------------------- + +type balancer struct { + client sarama.Client + topics map[string]topicInfo +} + +func newBalancerFromMeta(client sarama.Client, members map[string]sarama.ConsumerGroupMemberMetadata) (*balancer, error) { + balancer := newBalancer(client) + for memberID, meta := range members { + for _, topic := range meta.Topics { + if err := balancer.Topic(topic, memberID); err != nil { + return nil, err + } + } + } + return balancer, nil +} + +func newBalancer(client sarama.Client) *balancer { + return &balancer{ + client: client, + topics: make(map[string]topicInfo), + } +} + +func (r *balancer) Topic(name string, memberID string) error { + topic, ok := r.topics[name] + if !ok { + nums, err := r.client.Partitions(name) + if err != nil { + return err + } + topic = topicInfo{ + Partitions: nums, + MemberIDs: make([]string, 0, 1), + } + } + topic.MemberIDs = append(topic.MemberIDs, memberID) + r.topics[name] = topic + return nil +} + +func (r *balancer) Perform(s Strategy) map[string]map[string][]int32 { + if r == nil { + return nil + } + + res := make(map[string]map[string][]int32, 1) + for topic, info := range r.topics { + for memberID, partitions := range info.Perform(s) { + if _, ok := res[memberID]; !ok { + res[memberID] = make(map[string][]int32, 1) + } + res[memberID][topic] = partitions + } + } + return res +} diff --git a/vendor/github.com/bsm/sarama-cluster/client.go b/vendor/github.com/bsm/sarama-cluster/client.go new file mode 100644 index 0000000000..42ffb30c01 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/client.go @@ -0,0 +1,50 @@ +package cluster + +import ( + "errors" + "sync/atomic" + + "github.com/Shopify/sarama" +) + +var errClientInUse = errors.New("cluster: client is already used by another consumer") + +// Client is a group client +type Client struct { + sarama.Client + config Config + + inUse uint32 +} + +// NewClient creates a new client instance +func NewClient(addrs []string, config *Config) (*Client, error) { + if config == nil { + config = NewConfig() + } + + if err := config.Validate(); err != nil { + return nil, err + } + + client, err := sarama.NewClient(addrs, &config.Config) + if err != nil { + return nil, err + } + + return &Client{Client: client, config: *config}, nil +} + +// ClusterConfig returns the cluster configuration. +func (c *Client) ClusterConfig() *Config { + cfg := c.config + return &cfg +} + +func (c *Client) claim() bool { + return atomic.CompareAndSwapUint32(&c.inUse, 0, 1) +} + +func (c *Client) release() { + atomic.CompareAndSwapUint32(&c.inUse, 1, 0) +} diff --git a/vendor/github.com/bsm/sarama-cluster/cluster.go b/vendor/github.com/bsm/sarama-cluster/cluster.go new file mode 100644 index 0000000000..adcf0e9c1c --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/cluster.go @@ -0,0 +1,25 @@ +package cluster + +// Strategy for partition to consumer assignement +type Strategy string + +const ( + // StrategyRange is the default and assigns partition ranges to consumers. + // Example with six partitions and two consumers: + // C1: [0, 1, 2] + // C2: [3, 4, 5] + StrategyRange Strategy = "range" + + // StrategyRoundRobin assigns partitions by alternating over consumers. + // Example with six partitions and two consumers: + // C1: [0, 2, 4] + // C2: [1, 3, 5] + StrategyRoundRobin Strategy = "roundrobin" +) + +// Error instances are wrappers for internal errors with a context and +// may be returned through the consumer's Errors() channel +type Error struct { + Ctx string + error +} diff --git a/vendor/github.com/bsm/sarama-cluster/config.go b/vendor/github.com/bsm/sarama-cluster/config.go new file mode 100644 index 0000000000..084b835f71 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/config.go @@ -0,0 +1,146 @@ +package cluster + +import ( + "regexp" + "time" + + "github.com/Shopify/sarama" +) + +var minVersion = sarama.V0_9_0_0 + +type ConsumerMode uint8 + +const ( + ConsumerModeMultiplex ConsumerMode = iota + ConsumerModePartitions +) + +// Config extends sarama.Config with Group specific namespace +type Config struct { + sarama.Config + + // Group is the namespace for group management properties + Group struct { + + // The strategy to use for the allocation of partitions to consumers (defaults to StrategyRange) + PartitionStrategy Strategy + + // By default, messages and errors from the subscribed topics and partitions are all multiplexed and + // made available through the consumer's Messages() and Errors() channels. + // + // Users who require low-level access can enable ConsumerModePartitions where individual partitions + // are exposed on the Partitions() channel. Messages and errors must then be consumed on the partitions + // themselves. + Mode ConsumerMode + + Offsets struct { + Retry struct { + // The numer retries when committing offsets (defaults to 3). + Max int + } + Synchronization struct { + // The duration allowed for other clients to commit their offsets before resumption in this client, e.g. during a rebalance + // NewConfig sets this to the Consumer.MaxProcessingTime duration of the Sarama configuration + DwellTime time.Duration + } + } + + Session struct { + // The allowed session timeout for registered consumers (defaults to 30s). + // Must be within the allowed server range. + Timeout time.Duration + } + + Heartbeat struct { + // Interval between each heartbeat (defaults to 3s). It should be no more + // than 1/3rd of the Group.Session.Timout setting + Interval time.Duration + } + + // Return specifies which group channels will be populated. If they are set to true, + // you must read from the respective channels to prevent deadlock. + Return struct { + // If enabled, rebalance notification will be returned on the + // Notifications channel (default disabled). + Notifications bool + } + + Topics struct { + // An additional whitelist of topics to subscribe to. + Whitelist *regexp.Regexp + // An additional blacklist of topics to avoid. If set, this will precede over + // the Whitelist setting. + Blacklist *regexp.Regexp + } + + Member struct { + // Custom metadata to include when joining the group. The user data for all joined members + // can be retrieved by sending a DescribeGroupRequest to the broker that is the + // coordinator for the group. + UserData []byte + } + } +} + +// NewConfig returns a new configuration instance with sane defaults. +func NewConfig() *Config { + c := &Config{ + Config: *sarama.NewConfig(), + } + c.Group.PartitionStrategy = StrategyRange + c.Group.Offsets.Retry.Max = 3 + c.Group.Offsets.Synchronization.DwellTime = c.Consumer.MaxProcessingTime + c.Group.Session.Timeout = 30 * time.Second + c.Group.Heartbeat.Interval = 3 * time.Second + c.Config.Version = minVersion + return c +} + +// Validate checks a Config instance. It will return a +// sarama.ConfigurationError if the specified values don't make sense. +func (c *Config) Validate() error { + if c.Group.Heartbeat.Interval%time.Millisecond != 0 { + sarama.Logger.Println("Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Group.Session.Timeout%time.Millisecond != 0 { + sarama.Logger.Println("Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Group.PartitionStrategy != StrategyRange && c.Group.PartitionStrategy != StrategyRoundRobin { + sarama.Logger.Println("Group.PartitionStrategy is not supported; range will be assumed.") + } + if !c.Version.IsAtLeast(minVersion) { + sarama.Logger.Println("Version is not supported; 0.9. will be assumed.") + c.Version = minVersion + } + if err := c.Config.Validate(); err != nil { + return err + } + + // validate the Group values + switch { + case c.Group.Offsets.Retry.Max < 0: + return sarama.ConfigurationError("Group.Offsets.Retry.Max must be >= 0") + case c.Group.Offsets.Synchronization.DwellTime <= 0: + return sarama.ConfigurationError("Group.Offsets.Synchronization.DwellTime must be > 0") + case c.Group.Offsets.Synchronization.DwellTime > 10*time.Minute: + return sarama.ConfigurationError("Group.Offsets.Synchronization.DwellTime must be <= 10m") + case c.Group.Heartbeat.Interval <= 0: + return sarama.ConfigurationError("Group.Heartbeat.Interval must be > 0") + case c.Group.Session.Timeout <= 0: + return sarama.ConfigurationError("Group.Session.Timeout must be > 0") + case !c.Metadata.Full && c.Group.Topics.Whitelist != nil: + return sarama.ConfigurationError("Metadata.Full must be enabled when Group.Topics.Whitelist is used") + case !c.Metadata.Full && c.Group.Topics.Blacklist != nil: + return sarama.ConfigurationError("Metadata.Full must be enabled when Group.Topics.Blacklist is used") + } + + // ensure offset is correct + switch c.Consumer.Offsets.Initial { + case sarama.OffsetOldest, sarama.OffsetNewest: + default: + return sarama.ConfigurationError("Consumer.Offsets.Initial must be either OffsetOldest or OffsetNewest") + } + + return nil +} diff --git a/vendor/github.com/bsm/sarama-cluster/consumer.go b/vendor/github.com/bsm/sarama-cluster/consumer.go new file mode 100644 index 0000000000..13500cc8e7 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/consumer.go @@ -0,0 +1,924 @@ +package cluster + +import ( + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/Shopify/sarama" +) + +// Consumer is a cluster group consumer +type Consumer struct { + client *Client + ownClient bool + + consumer sarama.Consumer + subs *partitionMap + + consumerID string + groupID string + + memberID string + generationID int32 + membershipMu sync.RWMutex + + coreTopics []string + extraTopics []string + + dying, dead chan none + closeOnce sync.Once + + consuming int32 + messages chan *sarama.ConsumerMessage + errors chan error + partitions chan PartitionConsumer + notifications chan *Notification + + commitMu sync.Mutex +} + +// NewConsumer initializes a new consumer +func NewConsumer(addrs []string, groupID string, topics []string, config *Config) (*Consumer, error) { + client, err := NewClient(addrs, config) + if err != nil { + return nil, err + } + + consumer, err := NewConsumerFromClient(client, groupID, topics) + if err != nil { + return nil, err + } + consumer.ownClient = true + return consumer, nil +} + +// NewConsumerFromClient initializes a new consumer from an existing client. +// +// Please note that clients cannot be shared between consumers (due to Kafka internals), +// they can only be re-used which requires the user to call Close() on the first consumer +// before using this method again to initialize another one. Attempts to use a client with +// more than one consumer at a time will return errors. +func NewConsumerFromClient(client *Client, groupID string, topics []string) (*Consumer, error) { + if !client.claim() { + return nil, errClientInUse + } + + consumer, err := sarama.NewConsumerFromClient(client.Client) + if err != nil { + client.release() + return nil, err + } + + sort.Strings(topics) + c := &Consumer{ + client: client, + consumer: consumer, + subs: newPartitionMap(), + groupID: groupID, + + coreTopics: topics, + + dying: make(chan none), + dead: make(chan none), + + messages: make(chan *sarama.ConsumerMessage), + errors: make(chan error, client.config.ChannelBufferSize), + partitions: make(chan PartitionConsumer, 1), + notifications: make(chan *Notification), + } + if err := c.client.RefreshCoordinator(groupID); err != nil { + client.release() + return nil, err + } + + go c.mainLoop() + return c, nil +} + +// Messages returns the read channel for the messages that are returned by +// the broker. +// +// This channel will only return if Config.Group.Mode option is set to +// ConsumerModeMultiplex (default). +func (c *Consumer) Messages() <-chan *sarama.ConsumerMessage { return c.messages } + +// Partitions returns the read channels for individual partitions of this broker. +// +// This will channel will only return if Config.Group.Mode option is set to +// ConsumerModePartitions. +// +// The Partitions() channel must be listened to for the life of this consumer; +// when a rebalance happens old partitions will be closed (naturally come to +// completion) and new ones will be emitted. The returned channel will only close +// when the consumer is completely shut down. +func (c *Consumer) Partitions() <-chan PartitionConsumer { return c.partitions } + +// Errors returns a read channel of errors that occur during offset management, if +// enabled. By default, errors are logged and not returned over this channel. If +// you want to implement any custom error handling, set your config's +// Consumer.Return.Errors setting to true, and read from this channel. +func (c *Consumer) Errors() <-chan error { return c.errors } + +// Notifications returns a channel of Notifications that occur during consumer +// rebalancing. Notifications will only be emitted over this channel, if your config's +// Group.Return.Notifications setting to true. +func (c *Consumer) Notifications() <-chan *Notification { return c.notifications } + +// HighWaterMarks returns the current high water marks for each topic and partition +// Consistency between partitions is not guaranteed since high water marks are updated separately. +func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { return c.consumer.HighWaterMarks() } + +// MarkOffset marks the provided message as processed, alongside a metadata string +// that represents the state of the partition consumer at that point in time. The +// metadata string can be used by another consumer to restore that state, so it +// can resume consumption. +// +// Note: calling MarkOffset does not necessarily commit the offset to the backend +// store immediately for efficiency reasons, and it may never be committed if +// your application crashes. This means that you may end up processing the same +// message twice, and your processing should ideally be idempotent. +func (c *Consumer) MarkOffset(msg *sarama.ConsumerMessage, metadata string) { + sub := c.subs.Fetch(msg.Topic, msg.Partition) + if sub != nil { + sub.MarkOffset(msg.Offset+1, metadata) + } +} + +// MarkPartitionOffset marks an offset of the provided topic/partition as processed. +// See MarkOffset for additional explanation. +func (c *Consumer) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) { + sub := c.subs.Fetch(topic, partition) + if sub != nil { + sub.MarkOffset(offset+1, metadata) + } +} + +// MarkOffsets marks stashed offsets as processed. +// See MarkOffset for additional explanation. +func (c *Consumer) MarkOffsets(s *OffsetStash) { + s.mu.Lock() + defer s.mu.Unlock() + + for tp, info := range s.offsets { + sub := c.subs.Fetch(tp.Topic, tp.Partition) + if sub != nil { + sub.MarkOffset(info.Offset+1, info.Metadata) + } + delete(s.offsets, tp) + } +} + +// ResetOffsets marks the provided message as processed, alongside a metadata string +// that represents the state of the partition consumer at that point in time. The +// metadata string can be used by another consumer to restore that state, so it +// can resume consumption. +// +// Difference between ResetOffset and MarkOffset is that it allows to rewind to an earlier offset +func (c *Consumer) ResetOffset(msg *sarama.ConsumerMessage, metadata string) { + sub := c.subs.Fetch(msg.Topic, msg.Partition) + if sub != nil { + sub.ResetOffset(msg.Offset+1, metadata) + } +} + +// ResetPartitionOffset marks an offset of the provided topic/partition as processed. +// See ResetOffset for additional explanation. +func (c *Consumer) ResetPartitionOffset(topic string, partition int32, offset int64, metadata string) { + sub := c.subs.Fetch(topic, partition) + if sub != nil { + sub.ResetOffset(offset+1, metadata) + } +} + +// ResetOffsets marks stashed offsets as processed. +// See ResetOffset for additional explanation. +func (c *Consumer) ResetOffsets(s *OffsetStash) { + s.mu.Lock() + defer s.mu.Unlock() + + for tp, info := range s.offsets { + sub := c.subs.Fetch(tp.Topic, tp.Partition) + if sub != nil { + sub.ResetOffset(info.Offset+1, info.Metadata) + } + delete(s.offsets, tp) + } +} + +// Subscriptions returns the consumed topics and partitions +func (c *Consumer) Subscriptions() map[string][]int32 { + return c.subs.Info() +} + +// CommitOffsets allows to manually commit previously marked offsets. By default there is no +// need to call this function as the consumer will commit offsets automatically +// using the Config.Consumer.Offsets.CommitInterval setting. +// +// Please be aware that calling this function during an internal rebalance cycle may return +// broker errors (e.g. sarama.ErrUnknownMemberId or sarama.ErrIllegalGeneration). +func (c *Consumer) CommitOffsets() error { + c.commitMu.Lock() + defer c.commitMu.Unlock() + + memberID, generationID := c.membership() + req := &sarama.OffsetCommitRequest{ + Version: 2, + ConsumerGroup: c.groupID, + ConsumerGroupGeneration: generationID, + ConsumerID: memberID, + RetentionTime: -1, + } + + if ns := c.client.config.Consumer.Offsets.Retention; ns != 0 { + req.RetentionTime = int64(ns / time.Millisecond) + } + + snap := c.subs.Snapshot() + dirty := false + for tp, state := range snap { + if state.Dirty { + dirty = true + req.AddBlock(tp.Topic, tp.Partition, state.Info.Offset, 0, state.Info.Metadata) + } + } + if !dirty { + return nil + } + + broker, err := c.client.Coordinator(c.groupID) + if err != nil { + c.closeCoordinator(broker, err) + return err + } + + resp, err := broker.CommitOffset(req) + if err != nil { + c.closeCoordinator(broker, err) + return err + } + + for topic, errs := range resp.Errors { + for partition, kerr := range errs { + if kerr != sarama.ErrNoError { + err = kerr + } else if state, ok := snap[topicPartition{topic, partition}]; ok { + sub := c.subs.Fetch(topic, partition) + if sub != nil { + sub.MarkCommitted(state.Info.Offset) + } + } + } + } + return err +} + +// Close safely closes the consumer and releases all resources +func (c *Consumer) Close() (err error) { + c.closeOnce.Do(func() { + close(c.dying) + <-c.dead + + if e := c.release(); e != nil { + err = e + } + if e := c.consumer.Close(); e != nil { + err = e + } + close(c.messages) + close(c.errors) + + if e := c.leaveGroup(); e != nil { + err = e + } + close(c.partitions) + close(c.notifications) + + // drain + for range c.messages { + } + for range c.errors { + } + for p := range c.partitions { + _ = p.Close() + } + for range c.notifications { + } + + c.client.release() + if c.ownClient { + if e := c.client.Close(); e != nil { + err = e + } + } + }) + return +} + +func (c *Consumer) mainLoop() { + defer close(c.dead) + defer atomic.StoreInt32(&c.consuming, 0) + + for { + atomic.StoreInt32(&c.consuming, 0) + + // Check if close was requested + select { + case <-c.dying: + return + default: + } + + // Start next consume cycle + c.nextTick() + } +} + +func (c *Consumer) nextTick() { + // Remember previous subscriptions + var notification *Notification + if c.client.config.Group.Return.Notifications { + notification = newNotification(c.subs.Info()) + } + + // Refresh coordinator + if err := c.refreshCoordinator(); err != nil { + c.rebalanceError(err, nil) + return + } + + // Release subscriptions + if err := c.release(); err != nil { + c.rebalanceError(err, nil) + return + } + + // Issue rebalance start notification + if c.client.config.Group.Return.Notifications { + c.handleNotification(notification) + } + + // Rebalance, fetch new subscriptions + subs, err := c.rebalance() + if err != nil { + c.rebalanceError(err, notification) + return + } + + // Coordinate loops, make sure everything is + // stopped on exit + tomb := newLoopTomb() + defer tomb.Close() + + // Start the heartbeat + tomb.Go(c.hbLoop) + + // Subscribe to topic/partitions + if err := c.subscribe(tomb, subs); err != nil { + c.rebalanceError(err, notification) + return + } + + // Update/issue notification with new claims + if c.client.config.Group.Return.Notifications { + notification = notification.success(subs) + c.handleNotification(notification) + } + + // Start topic watcher loop + tomb.Go(c.twLoop) + + // Start consuming and committing offsets + tomb.Go(c.cmLoop) + atomic.StoreInt32(&c.consuming, 1) + + // Wait for signals + select { + case <-tomb.Dying(): + case <-c.dying: + } +} + +// heartbeat loop, triggered by the mainLoop +func (c *Consumer) hbLoop(stopped <-chan none) { + ticker := time.NewTicker(c.client.config.Group.Heartbeat.Interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + switch err := c.heartbeat(); err { + case nil, sarama.ErrNoError: + case sarama.ErrNotCoordinatorForConsumer, sarama.ErrRebalanceInProgress: + return + default: + c.handleError(&Error{Ctx: "heartbeat", error: err}) + return + } + case <-stopped: + return + case <-c.dying: + return + } + } +} + +// topic watcher loop, triggered by the mainLoop +func (c *Consumer) twLoop(stopped <-chan none) { + ticker := time.NewTicker(c.client.config.Metadata.RefreshFrequency / 2) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + topics, err := c.client.Topics() + if err != nil { + c.handleError(&Error{Ctx: "topics", error: err}) + return + } + + for _, topic := range topics { + if !c.isKnownCoreTopic(topic) && + !c.isKnownExtraTopic(topic) && + c.isPotentialExtraTopic(topic) { + return + } + } + case <-stopped: + return + case <-c.dying: + return + } + } +} + +// commit loop, triggered by the mainLoop +func (c *Consumer) cmLoop(stopped <-chan none) { + ticker := time.NewTicker(c.client.config.Consumer.Offsets.CommitInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := c.commitOffsetsWithRetry(c.client.config.Group.Offsets.Retry.Max); err != nil { + c.handleError(&Error{Ctx: "commit", error: err}) + return + } + case <-stopped: + return + case <-c.dying: + return + } + } +} + +func (c *Consumer) rebalanceError(err error, n *Notification) { + if n != nil { + n.Type = RebalanceError + c.handleNotification(n) + } + + switch err { + case sarama.ErrRebalanceInProgress: + default: + c.handleError(&Error{Ctx: "rebalance", error: err}) + } + + select { + case <-c.dying: + case <-time.After(c.client.config.Metadata.Retry.Backoff): + } +} + +func (c *Consumer) handleNotification(n *Notification) { + if c.client.config.Group.Return.Notifications { + select { + case c.notifications <- n: + case <-c.dying: + return + } + } +} + +func (c *Consumer) handleError(e *Error) { + if c.client.config.Consumer.Return.Errors { + select { + case c.errors <- e: + case <-c.dying: + return + } + } else { + sarama.Logger.Printf("%s error: %s\n", e.Ctx, e.Error()) + } +} + +// Releases the consumer and commits offsets, called from rebalance() and Close() +func (c *Consumer) release() (err error) { + // Stop all consumers + c.subs.Stop() + + // Clear subscriptions on exit + defer c.subs.Clear() + + // Wait for messages to be processed + timeout := time.NewTimer(c.client.config.Group.Offsets.Synchronization.DwellTime) + defer timeout.Stop() + + select { + case <-c.dying: + case <-timeout.C: + } + + // Commit offsets, continue on errors + if e := c.commitOffsetsWithRetry(c.client.config.Group.Offsets.Retry.Max); e != nil { + err = e + } + + return +} + +// -------------------------------------------------------------------- + +// Performs a heartbeat, part of the mainLoop() +func (c *Consumer) heartbeat() error { + broker, err := c.client.Coordinator(c.groupID) + if err != nil { + c.closeCoordinator(broker, err) + return err + } + + memberID, generationID := c.membership() + resp, err := broker.Heartbeat(&sarama.HeartbeatRequest{ + GroupId: c.groupID, + MemberId: memberID, + GenerationId: generationID, + }) + if err != nil { + c.closeCoordinator(broker, err) + return err + } + return resp.Err +} + +// Performs a rebalance, part of the mainLoop() +func (c *Consumer) rebalance() (map[string][]int32, error) { + memberID, _ := c.membership() + sarama.Logger.Printf("cluster/consumer %s rebalance\n", memberID) + + allTopics, err := c.client.Topics() + if err != nil { + return nil, err + } + c.extraTopics = c.selectExtraTopics(allTopics) + sort.Strings(c.extraTopics) + + // Re-join consumer group + strategy, err := c.joinGroup() + switch { + case err == sarama.ErrUnknownMemberId: + c.membershipMu.Lock() + c.memberID = "" + c.membershipMu.Unlock() + return nil, err + case err != nil: + return nil, err + } + + // Sync consumer group state, fetch subscriptions + subs, err := c.syncGroup(strategy) + switch { + case err == sarama.ErrRebalanceInProgress: + return nil, err + case err != nil: + _ = c.leaveGroup() + return nil, err + } + return subs, nil +} + +// Performs the subscription, part of the mainLoop() +func (c *Consumer) subscribe(tomb *loopTomb, subs map[string][]int32) error { + // fetch offsets + offsets, err := c.fetchOffsets(subs) + if err != nil { + _ = c.leaveGroup() + return err + } + + // create consumers in parallel + var mu sync.Mutex + var wg sync.WaitGroup + + for topic, partitions := range subs { + for _, partition := range partitions { + wg.Add(1) + + info := offsets[topic][partition] + go func(topic string, partition int32) { + if e := c.createConsumer(tomb, topic, partition, info); e != nil { + mu.Lock() + err = e + mu.Unlock() + } + wg.Done() + }(topic, partition) + } + } + wg.Wait() + + if err != nil { + _ = c.release() + _ = c.leaveGroup() + } + return err +} + +// -------------------------------------------------------------------- + +// Send a request to the broker to join group on rebalance() +func (c *Consumer) joinGroup() (*balancer, error) { + memberID, _ := c.membership() + req := &sarama.JoinGroupRequest{ + GroupId: c.groupID, + MemberId: memberID, + SessionTimeout: int32(c.client.config.Group.Session.Timeout / time.Millisecond), + ProtocolType: "consumer", + } + + meta := &sarama.ConsumerGroupMemberMetadata{ + Version: 1, + Topics: append(c.coreTopics, c.extraTopics...), + UserData: c.client.config.Group.Member.UserData, + } + err := req.AddGroupProtocolMetadata(string(StrategyRange), meta) + if err != nil { + return nil, err + } + err = req.AddGroupProtocolMetadata(string(StrategyRoundRobin), meta) + if err != nil { + return nil, err + } + + broker, err := c.client.Coordinator(c.groupID) + if err != nil { + c.closeCoordinator(broker, err) + return nil, err + } + + resp, err := broker.JoinGroup(req) + if err != nil { + c.closeCoordinator(broker, err) + return nil, err + } else if resp.Err != sarama.ErrNoError { + c.closeCoordinator(broker, resp.Err) + return nil, resp.Err + } + + var strategy *balancer + if resp.LeaderId == resp.MemberId { + members, err := resp.GetMembers() + if err != nil { + return nil, err + } + + strategy, err = newBalancerFromMeta(c.client, members) + if err != nil { + return nil, err + } + } + + c.membershipMu.Lock() + c.memberID = resp.MemberId + c.generationID = resp.GenerationId + c.membershipMu.Unlock() + + return strategy, nil +} + +// Send a request to the broker to sync the group on rebalance(). +// Returns a list of topics and partitions to consume. +func (c *Consumer) syncGroup(strategy *balancer) (map[string][]int32, error) { + memberID, generationID := c.membership() + req := &sarama.SyncGroupRequest{ + GroupId: c.groupID, + MemberId: memberID, + GenerationId: generationID, + } + + for memberID, topics := range strategy.Perform(c.client.config.Group.PartitionStrategy) { + if err := req.AddGroupAssignmentMember(memberID, &sarama.ConsumerGroupMemberAssignment{ + Version: 1, + Topics: topics, + }); err != nil { + return nil, err + } + } + + broker, err := c.client.Coordinator(c.groupID) + if err != nil { + c.closeCoordinator(broker, err) + return nil, err + } + + resp, err := broker.SyncGroup(req) + if err != nil { + c.closeCoordinator(broker, err) + return nil, err + } else if resp.Err != sarama.ErrNoError { + c.closeCoordinator(broker, resp.Err) + return nil, resp.Err + } + + // Return if there is nothing to subscribe to + if len(resp.MemberAssignment) == 0 { + return nil, nil + } + + // Get assigned subscriptions + members, err := resp.GetMemberAssignment() + if err != nil { + return nil, err + } + + // Sort partitions, for each topic + for topic := range members.Topics { + sort.Sort(int32Slice(members.Topics[topic])) + } + return members.Topics, nil +} + +// Fetches latest committed offsets for all subscriptions +func (c *Consumer) fetchOffsets(subs map[string][]int32) (map[string]map[int32]offsetInfo, error) { + offsets := make(map[string]map[int32]offsetInfo, len(subs)) + req := &sarama.OffsetFetchRequest{ + Version: 1, + ConsumerGroup: c.groupID, + } + + for topic, partitions := range subs { + offsets[topic] = make(map[int32]offsetInfo, len(partitions)) + for _, partition := range partitions { + offsets[topic][partition] = offsetInfo{Offset: -1} + req.AddPartition(topic, partition) + } + } + + broker, err := c.client.Coordinator(c.groupID) + if err != nil { + c.closeCoordinator(broker, err) + return nil, err + } + + resp, err := broker.FetchOffset(req) + if err != nil { + c.closeCoordinator(broker, err) + return nil, err + } + + for topic, partitions := range subs { + for _, partition := range partitions { + block := resp.GetBlock(topic, partition) + if block == nil { + return nil, sarama.ErrIncompleteResponse + } + + if block.Err == sarama.ErrNoError { + offsets[topic][partition] = offsetInfo{Offset: block.Offset, Metadata: block.Metadata} + } else { + return nil, block.Err + } + } + } + return offsets, nil +} + +// Send a request to the broker to leave the group on failes rebalance() and on Close() +func (c *Consumer) leaveGroup() error { + broker, err := c.client.Coordinator(c.groupID) + if err != nil { + c.closeCoordinator(broker, err) + return err + } + + memberID, _ := c.membership() + if _, err = broker.LeaveGroup(&sarama.LeaveGroupRequest{ + GroupId: c.groupID, + MemberId: memberID, + }); err != nil { + c.closeCoordinator(broker, err) + } + return err +} + +// -------------------------------------------------------------------- + +func (c *Consumer) createConsumer(tomb *loopTomb, topic string, partition int32, info offsetInfo) error { + memberID, _ := c.membership() + sarama.Logger.Printf("cluster/consumer %s consume %s/%d from %d\n", memberID, topic, partition, info.NextOffset(c.client.config.Consumer.Offsets.Initial)) + + // Create partitionConsumer + pc, err := newPartitionConsumer(c.consumer, topic, partition, info, c.client.config.Consumer.Offsets.Initial) + if err != nil { + return err + } + + // Store in subscriptions + c.subs.Store(topic, partition, pc) + + // Start partition consumer goroutine + tomb.Go(func(stopper <-chan none) { + if c.client.config.Group.Mode == ConsumerModePartitions { + pc.WaitFor(stopper, c.errors) + } else { + pc.Multiplex(stopper, c.messages, c.errors) + } + }) + + if c.client.config.Group.Mode == ConsumerModePartitions { + c.partitions <- pc + } + return nil +} + +func (c *Consumer) commitOffsetsWithRetry(retries int) error { + err := c.CommitOffsets() + if err != nil && retries > 0 { + return c.commitOffsetsWithRetry(retries - 1) + } + return err +} + +func (c *Consumer) closeCoordinator(broker *sarama.Broker, err error) { + if broker != nil { + _ = broker.Close() + } + + switch err { + case sarama.ErrConsumerCoordinatorNotAvailable, sarama.ErrNotCoordinatorForConsumer: + _ = c.client.RefreshCoordinator(c.groupID) + } +} + +func (c *Consumer) selectExtraTopics(allTopics []string) []string { + extra := allTopics[:0] + for _, topic := range allTopics { + if !c.isKnownCoreTopic(topic) && c.isPotentialExtraTopic(topic) { + extra = append(extra, topic) + } + } + return extra +} + +func (c *Consumer) isKnownCoreTopic(topic string) bool { + pos := sort.SearchStrings(c.coreTopics, topic) + return pos < len(c.coreTopics) && c.coreTopics[pos] == topic +} + +func (c *Consumer) isKnownExtraTopic(topic string) bool { + pos := sort.SearchStrings(c.extraTopics, topic) + return pos < len(c.extraTopics) && c.extraTopics[pos] == topic +} + +func (c *Consumer) isPotentialExtraTopic(topic string) bool { + rx := c.client.config.Group.Topics + if rx.Blacklist != nil && rx.Blacklist.MatchString(topic) { + return false + } + if rx.Whitelist != nil && rx.Whitelist.MatchString(topic) { + return true + } + return false +} + +func (c *Consumer) refreshCoordinator() error { + if err := c.refreshMetadata(); err != nil { + return err + } + return c.client.RefreshCoordinator(c.groupID) +} + +func (c *Consumer) refreshMetadata() (err error) { + if c.client.config.Metadata.Full { + err = c.client.RefreshMetadata() + } else { + var topics []string + if topics, err = c.client.Topics(); err == nil && len(topics) != 0 { + err = c.client.RefreshMetadata(topics...) + } + } + + // maybe we didn't have authorization to describe all topics + switch err { + case sarama.ErrTopicAuthorizationFailed: + err = c.client.RefreshMetadata(c.coreTopics...) + } + return +} + +func (c *Consumer) membership() (memberID string, generationID int32) { + c.membershipMu.RLock() + memberID, generationID = c.memberID, c.generationID + c.membershipMu.RUnlock() + return +} diff --git a/vendor/github.com/bsm/sarama-cluster/doc.go b/vendor/github.com/bsm/sarama-cluster/doc.go new file mode 100644 index 0000000000..9c8ff16a77 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/doc.go @@ -0,0 +1,8 @@ +/* +Package cluster provides cluster extensions for Sarama, enabing users +to consume topics across from multiple, balanced nodes. + +It requires Kafka v0.9+ and follows the steps guide, described in: +https://cwiki.apache.org/confluence/display/KAFKA/Kafka+0.9+Consumer+Rewrite+Design +*/ +package cluster diff --git a/vendor/github.com/bsm/sarama-cluster/offsets.go b/vendor/github.com/bsm/sarama-cluster/offsets.go new file mode 100644 index 0000000000..4223ac5e01 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/offsets.go @@ -0,0 +1,69 @@ +package cluster + +import ( + "sync" + + "github.com/Shopify/sarama" +) + +// OffsetStash allows to accumulate offsets and +// mark them as processed in a bulk +type OffsetStash struct { + offsets map[topicPartition]offsetInfo + mu sync.Mutex +} + +// NewOffsetStash inits a blank stash +func NewOffsetStash() *OffsetStash { + return &OffsetStash{offsets: make(map[topicPartition]offsetInfo)} +} + +// MarkOffset stashes the provided message offset +func (s *OffsetStash) MarkOffset(msg *sarama.ConsumerMessage, metadata string) { + s.MarkPartitionOffset(msg.Topic, msg.Partition, msg.Offset, metadata) +} + +// MarkPartitionOffset stashes the offset for the provided topic/partition combination +func (s *OffsetStash) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) { + s.mu.Lock() + defer s.mu.Unlock() + + key := topicPartition{Topic: topic, Partition: partition} + if info := s.offsets[key]; offset >= info.Offset { + info.Offset = offset + info.Metadata = metadata + s.offsets[key] = info + } +} + +// ResetPartitionOffset stashes the offset for the provided topic/partition combination. +// Difference between ResetPartitionOffset and MarkPartitionOffset is that, ResetPartitionOffset supports earlier offsets +func (s *OffsetStash) ResetPartitionOffset(topic string, partition int32, offset int64, metadata string) { + s.mu.Lock() + defer s.mu.Unlock() + + key := topicPartition{Topic: topic, Partition: partition} + if info := s.offsets[key]; offset <= info.Offset { + info.Offset = offset + info.Metadata = metadata + s.offsets[key] = info + } +} + +// ResetOffset stashes the provided message offset +// See ResetPartitionOffset for explanation +func (s *OffsetStash) ResetOffset(msg *sarama.ConsumerMessage, metadata string) { + s.ResetPartitionOffset(msg.Topic, msg.Partition, msg.Offset, metadata) +} + +// Offsets returns the latest stashed offsets by topic-partition +func (s *OffsetStash) Offsets() map[string]int64 { + s.mu.Lock() + defer s.mu.Unlock() + + res := make(map[string]int64, len(s.offsets)) + for tp, info := range s.offsets { + res[tp.String()] = info.Offset + } + return res +} diff --git a/vendor/github.com/bsm/sarama-cluster/partitions.go b/vendor/github.com/bsm/sarama-cluster/partitions.go new file mode 100644 index 0000000000..987780bde1 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/partitions.go @@ -0,0 +1,287 @@ +package cluster + +import ( + "sort" + "sync" + "time" + + "github.com/Shopify/sarama" +) + +// PartitionConsumer allows code to consume individual partitions from the cluster. +// +// See docs for Consumer.Partitions() for more on how to implement this. +type PartitionConsumer interface { + sarama.PartitionConsumer + + // Topic returns the consumed topic name + Topic() string + + // Partition returns the consumed partition + Partition() int32 +} + +type partitionConsumer struct { + sarama.PartitionConsumer + + state partitionState + mu sync.Mutex + + topic string + partition int32 + + closeOnce sync.Once + closeErr error + + dying, dead chan none +} + +func newPartitionConsumer(manager sarama.Consumer, topic string, partition int32, info offsetInfo, defaultOffset int64) (*partitionConsumer, error) { + pcm, err := manager.ConsumePartition(topic, partition, info.NextOffset(defaultOffset)) + + // Resume from default offset, if requested offset is out-of-range + if err == sarama.ErrOffsetOutOfRange { + info.Offset = -1 + pcm, err = manager.ConsumePartition(topic, partition, defaultOffset) + } + if err != nil { + return nil, err + } + + return &partitionConsumer{ + PartitionConsumer: pcm, + state: partitionState{Info: info}, + + topic: topic, + partition: partition, + + dying: make(chan none), + dead: make(chan none), + }, nil +} + +// Topic implements PartitionConsumer +func (c *partitionConsumer) Topic() string { return c.topic } + +// Partition implements PartitionConsumer +func (c *partitionConsumer) Partition() int32 { return c.partition } + +// AsyncClose implements PartitionConsumer +func (c *partitionConsumer) AsyncClose() { + c.closeOnce.Do(func() { + c.closeErr = c.PartitionConsumer.Close() + close(c.dying) + }) +} + +// Close implements PartitionConsumer +func (c *partitionConsumer) Close() error { + c.AsyncClose() + <-c.dead + return c.closeErr +} + +func (c *partitionConsumer) WaitFor(stopper <-chan none, errors chan<- error) { + defer close(c.dead) + + for { + select { + case err, ok := <-c.Errors(): + if !ok { + return + } + select { + case errors <- err: + case <-stopper: + return + case <-c.dying: + return + } + case <-stopper: + return + case <-c.dying: + return + } + } +} + +func (c *partitionConsumer) Multiplex(stopper <-chan none, messages chan<- *sarama.ConsumerMessage, errors chan<- error) { + defer close(c.dead) + + for { + select { + case msg, ok := <-c.Messages(): + if !ok { + return + } + select { + case messages <- msg: + case <-stopper: + return + case <-c.dying: + return + } + case err, ok := <-c.Errors(): + if !ok { + return + } + select { + case errors <- err: + case <-stopper: + return + case <-c.dying: + return + } + case <-stopper: + return + case <-c.dying: + return + } + } +} + +func (c *partitionConsumer) State() partitionState { + if c == nil { + return partitionState{} + } + + c.mu.Lock() + state := c.state + c.mu.Unlock() + + return state +} + +func (c *partitionConsumer) MarkCommitted(offset int64) { + if c == nil { + return + } + + c.mu.Lock() + if offset == c.state.Info.Offset { + c.state.Dirty = false + } + c.mu.Unlock() +} + +func (c *partitionConsumer) MarkOffset(offset int64, metadata string) { + if c == nil { + return + } + + c.mu.Lock() + if offset > c.state.Info.Offset { + c.state.Info.Offset = offset + c.state.Info.Metadata = metadata + c.state.Dirty = true + } + c.mu.Unlock() +} + +func (c *partitionConsumer) ResetOffset(offset int64, metadata string) { + if c == nil { + return + } + + c.mu.Lock() + if offset <= c.state.Info.Offset { + c.state.Info.Offset = offset + c.state.Info.Metadata = metadata + c.state.Dirty = true + } + c.mu.Unlock() +} + +// -------------------------------------------------------------------- + +type partitionState struct { + Info offsetInfo + Dirty bool + LastCommit time.Time +} + +// -------------------------------------------------------------------- + +type partitionMap struct { + data map[topicPartition]*partitionConsumer + mu sync.RWMutex +} + +func newPartitionMap() *partitionMap { + return &partitionMap{ + data: make(map[topicPartition]*partitionConsumer), + } +} + +func (m *partitionMap) IsSubscribedTo(topic string) bool { + m.mu.RLock() + defer m.mu.RUnlock() + + for tp := range m.data { + if tp.Topic == topic { + return true + } + } + return false +} + +func (m *partitionMap) Fetch(topic string, partition int32) *partitionConsumer { + m.mu.RLock() + pc, _ := m.data[topicPartition{topic, partition}] + m.mu.RUnlock() + return pc +} + +func (m *partitionMap) Store(topic string, partition int32, pc *partitionConsumer) { + m.mu.Lock() + m.data[topicPartition{topic, partition}] = pc + m.mu.Unlock() +} + +func (m *partitionMap) Snapshot() map[topicPartition]partitionState { + m.mu.RLock() + defer m.mu.RUnlock() + + snap := make(map[topicPartition]partitionState, len(m.data)) + for tp, pc := range m.data { + snap[tp] = pc.State() + } + return snap +} + +func (m *partitionMap) Stop() { + m.mu.RLock() + defer m.mu.RUnlock() + + var wg sync.WaitGroup + for tp := range m.data { + wg.Add(1) + go func(p *partitionConsumer) { + _ = p.Close() + wg.Done() + }(m.data[tp]) + } + wg.Wait() +} + +func (m *partitionMap) Clear() { + m.mu.Lock() + for tp := range m.data { + delete(m.data, tp) + } + m.mu.Unlock() +} + +func (m *partitionMap) Info() map[string][]int32 { + info := make(map[string][]int32) + m.mu.RLock() + for tp := range m.data { + info[tp.Topic] = append(info[tp.Topic], tp.Partition) + } + m.mu.RUnlock() + + for topic := range info { + sort.Sort(int32Slice(info[topic])) + } + return info +} diff --git a/vendor/github.com/bsm/sarama-cluster/util.go b/vendor/github.com/bsm/sarama-cluster/util.go new file mode 100644 index 0000000000..e7cb5dd1b8 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/util.go @@ -0,0 +1,75 @@ +package cluster + +import ( + "fmt" + "sort" + "sync" +) + +type none struct{} + +type topicPartition struct { + Topic string + Partition int32 +} + +func (tp *topicPartition) String() string { + return fmt.Sprintf("%s-%d", tp.Topic, tp.Partition) +} + +type offsetInfo struct { + Offset int64 + Metadata string +} + +func (i offsetInfo) NextOffset(fallback int64) int64 { + if i.Offset > -1 { + return i.Offset + } + return fallback +} + +type int32Slice []int32 + +func (p int32Slice) Len() int { return len(p) } +func (p int32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p int32Slice) Diff(o int32Slice) (res []int32) { + on := len(o) + for _, x := range p { + n := sort.Search(on, func(i int) bool { return o[i] >= x }) + if n < on && o[n] == x { + continue + } + res = append(res, x) + } + return +} + +// -------------------------------------------------------------------- + +type loopTomb struct { + c chan none + o sync.Once + w sync.WaitGroup +} + +func newLoopTomb() *loopTomb { + return &loopTomb{c: make(chan none)} +} + +func (t *loopTomb) stop() { t.o.Do(func() { close(t.c) }) } +func (t *loopTomb) Close() { t.stop(); t.w.Wait() } + +func (t *loopTomb) Dying() <-chan none { return t.c } +func (t *loopTomb) Go(f func(<-chan none)) { + t.w.Add(1) + + go func() { + defer t.stop() + defer t.w.Done() + + f(t.c) + }() +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 0000000000..c836416192 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 0000000000..8a4a6589a2 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,152 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +var ( + // offsetPtr, offsetScalar, and offsetFlag are the offsets for the + // internal reflect.Value fields. These values are valid before golang + // commit ecccf07e7f9d which changed the format. The are also valid + // after commit 82f48826c6c7 which changed the format again to mirror + // the original format. Code in the init function updates these offsets + // as necessary. + offsetPtr = uintptr(ptrSize) + offsetScalar = uintptr(0) + offsetFlag = uintptr(ptrSize * 2) + + // flagKindWidth and flagKindShift indicate various bits that the + // reflect package uses internally to track kind information. + // + // flagRO indicates whether or not the value field of a reflect.Value is + // read-only. + // + // flagIndir indicates whether the value field of a reflect.Value is + // the actual data or a pointer to the data. + // + // These values are valid before golang commit 90a7c3c86944 which + // changed their positions. Code in the init function updates these + // flags as necessary. + flagKindWidth = uintptr(5) + flagKindShift = uintptr(flagKindWidth - 1) + flagRO = uintptr(1 << 0) + flagIndir = uintptr(1 << 1) +) + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Versions + // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named + // scalar for this purpose which unfortunately came before the flag + // field, so the offset of the flag field is different for those + // versions. + // + // This code constructs a new reflect.Value from a known small integer + // and checks if the size of the reflect.Value struct indicates it has + // the scalar field. When it does, the offsets are updated accordingly. + vv := reflect.ValueOf(0xf00) + if unsafe.Sizeof(vv) == (ptrSize * 4) { + offsetScalar = ptrSize * 2 + offsetFlag = ptrSize * 3 + } + + // Commit 90a7c3c86944 changed the flag positions such that the low + // order bits are the kind. This code extracts the kind from the flags + // field and ensures it's the correct type. When it's not, the flag + // order has been changed to the newer format, so the flags are updated + // accordingly. + upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) + upfv := *(*uintptr)(upf) + flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { + flagKindShift = 0 + flagRO = 1 << 5 + flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } + } +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + // The value is in the scalar field when it's not one of the + // reference types. + switch vt.Kind() { + case reflect.Uintptr: + case reflect.Chan: + case reflect.Func: + case reflect.Map: + case reflect.Ptr: + case reflect.UnsafePointer: + default: + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + + offsetScalar) + } + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 0000000000..1fe3cf3d5d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 0000000000..7c519ff47a --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 0000000000..2e3d22f312 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 0000000000..aacaac6f1e --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 0000000000..df1d582a72 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound == true: + d.w.Write(nilAngleBytes) + + case cycleFound == true: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 0000000000..c49875bacb --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound == true: + f.fs.Write(nilAngleBytes) + + case cycleFound == true: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 0000000000..32c0e33882 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/eapache/go-resiliency/LICENSE b/vendor/github.com/eapache/go-resiliency/LICENSE new file mode 100644 index 0000000000..698a3f5139 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md new file mode 100644 index 0000000000..2d1b3d9322 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/breaker/README.md @@ -0,0 +1,34 @@ +circuit-breaker +=============== + +[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) +[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker) +[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) + +The circuit-breaker resiliency pattern for golang. + +Creating a breaker takes three parameters: +- error threshold (for opening the breaker) +- success threshold (for closing the breaker) +- timeout (how long to keep the breaker open) + +```go +b := breaker.New(3, 1, 5*time.Second) + +for { + result := b.Run(func() error { + // communicate with some external service and + // return an error if the communication failed + return nil + }) + + switch result { + case nil: + // success! + case breaker.ErrBreakerOpen: + // our function wasn't run because the breaker was open + default: + // some other error + } +} +``` diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go new file mode 100644 index 0000000000..f88ca7248b --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go @@ -0,0 +1,161 @@ +// Package breaker implements the circuit-breaker resiliency pattern for Go. +package breaker + +import ( + "errors" + "sync" + "sync/atomic" + "time" +) + +// ErrBreakerOpen is the error returned from Run() when the function is not executed +// because the breaker is currently open. +var ErrBreakerOpen = errors.New("circuit breaker is open") + +const ( + closed uint32 = iota + open + halfOpen +) + +// Breaker implements the circuit-breaker resiliency pattern +type Breaker struct { + errorThreshold, successThreshold int + timeout time.Duration + + lock sync.Mutex + state uint32 + errors, successes int + lastError time.Time +} + +// New constructs a new circuit-breaker that starts closed. +// From closed, the breaker opens if "errorThreshold" errors are seen +// without an error-free period of at least "timeout". From open, the +// breaker half-closes after "timeout". From half-open, the breaker closes +// after "successThreshold" consecutive successes, or opens on a single error. +func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker { + return &Breaker{ + errorThreshold: errorThreshold, + successThreshold: successThreshold, + timeout: timeout, + } +} + +// Run will either return ErrBreakerOpen immediately if the circuit-breaker is +// already open, or it will run the given function and pass along its return +// value. It is safe to call Run concurrently on the same Breaker. +func (b *Breaker) Run(work func() error) error { + state := atomic.LoadUint32(&b.state) + + if state == open { + return ErrBreakerOpen + } + + return b.doWork(state, work) +} + +// Go will either return ErrBreakerOpen immediately if the circuit-breaker is +// already open, or it will run the given function in a separate goroutine. +// If the function is run, Go will return nil immediately, and will *not* return +// the return value of the function. It is safe to call Go concurrently on the +// same Breaker. +func (b *Breaker) Go(work func() error) error { + state := atomic.LoadUint32(&b.state) + + if state == open { + return ErrBreakerOpen + } + + // errcheck complains about ignoring the error return value, but + // that's on purpose; if you want an error from a goroutine you have to + // get it over a channel or something + go b.doWork(state, work) + + return nil +} + +func (b *Breaker) doWork(state uint32, work func() error) error { + var panicValue interface{} + + result := func() error { + defer func() { + panicValue = recover() + }() + return work() + }() + + if result == nil && panicValue == nil && state == closed { + // short-circuit the normal, success path without contending + // on the lock + return nil + } + + // oh well, I guess we have to contend on the lock + b.processResult(result, panicValue) + + if panicValue != nil { + // as close as Go lets us come to a "rethrow" although unfortunately + // we lose the original panicing location + panic(panicValue) + } + + return result +} + +func (b *Breaker) processResult(result error, panicValue interface{}) { + b.lock.Lock() + defer b.lock.Unlock() + + if result == nil && panicValue == nil { + if b.state == halfOpen { + b.successes++ + if b.successes == b.successThreshold { + b.closeBreaker() + } + } + } else { + if b.errors > 0 { + expiry := b.lastError.Add(b.timeout) + if time.Now().After(expiry) { + b.errors = 0 + } + } + + switch b.state { + case closed: + b.errors++ + if b.errors == b.errorThreshold { + b.openBreaker() + } else { + b.lastError = time.Now() + } + case halfOpen: + b.openBreaker() + } + } +} + +func (b *Breaker) openBreaker() { + b.changeState(open) + go b.timer() +} + +func (b *Breaker) closeBreaker() { + b.changeState(closed) +} + +func (b *Breaker) timer() { + time.Sleep(b.timeout) + + b.lock.Lock() + defer b.lock.Unlock() + + b.changeState(halfOpen) +} + +func (b *Breaker) changeState(newState uint32) { + b.errors = 0 + b.successes = 0 + atomic.StoreUint32(&b.state, newState) +} diff --git a/vendor/github.com/eapache/go-xerial-snappy/.gitignore b/vendor/github.com/eapache/go-xerial-snappy/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml new file mode 100644 index 0000000000..d6cf4f1fa1 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: +- 1.5.4 +- 1.6.1 + +sudo: false diff --git a/vendor/github.com/eapache/go-xerial-snappy/LICENSE b/vendor/github.com/eapache/go-xerial-snappy/LICENSE new file mode 100644 index 0000000000..5bf3688d9e --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/eapache/go-xerial-snappy/README.md b/vendor/github.com/eapache/go-xerial-snappy/README.md new file mode 100644 index 0000000000..3f2695c728 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/README.md @@ -0,0 +1,13 @@ +# go-xerial-snappy + +[![Build Status](https://travis-ci.org/eapache/go-xerial-snappy.svg?branch=master)](https://travis-ci.org/eapache/go-xerial-snappy) + +Xerial-compatible Snappy framing support for golang. + +Packages using Xerial for snappy encoding use a framing format incompatible with +basically everything else in existence. This package wraps Go's built-in snappy +package to support it. + +Apps that use this format include Apache Kafka (see +https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for +details). diff --git a/vendor/github.com/eapache/go-xerial-snappy/fuzz.go b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go new file mode 100644 index 0000000000..6a46f4784e --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go @@ -0,0 +1,16 @@ +// +build gofuzz + +package snappy + +func Fuzz(data []byte) int { + decode, err := Decode(data) + if decode == nil && err == nil { + panic("nil error with nil result") + } + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go new file mode 100644 index 0000000000..de69eacbb3 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go @@ -0,0 +1,71 @@ +package snappy + +import ( + "bytes" + "encoding/binary" + "errors" + + master "github.com/golang/snappy" +) + +const ( + sizeOffset = 16 + sizeBytes = 4 +) + +var ( + xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0} + // ErrMalformed is returned by the decoder when the xerial framing + // is malformed + ErrMalformed = errors.New("malformed xerial framing") +) + +// Encode encodes data as snappy with no framing header. +func Encode(src []byte) []byte { + return master.Encode(nil, src) +} + +// Decode decodes snappy data whether it is traditional unframed +// or includes the xerial framing format. +func Decode(src []byte) ([]byte, error) { + var max = len(src) + if max < len(xerialHeader) { + return nil, ErrMalformed + } + + if !bytes.Equal(src[:8], xerialHeader) { + return master.Decode(nil, src) + } + + if max < sizeOffset+sizeBytes { + return nil, ErrMalformed + } + + var ( + pos = sizeOffset + dst = make([]byte, 0, len(src)) + chunk []byte + err error + ) + + for pos+sizeBytes <= max { + size := int(binary.BigEndian.Uint32(src[pos : pos+sizeBytes])) + pos += sizeBytes + + nextPos := pos + size + // On architectures where int is 32-bytes wide size + pos could + // overflow so we need to check the low bound as well as the + // high + if nextPos < pos || nextPos > max { + return nil, ErrMalformed + } + + chunk, err = master.Decode(chunk, src[pos:nextPos]) + if err != nil { + return nil, err + } + pos = nextPos + dst = append(dst, chunk...) + } + return dst, nil +} diff --git a/vendor/github.com/eapache/queue/.gitignore b/vendor/github.com/eapache/queue/.gitignore new file mode 100644 index 0000000000..836562412f --- /dev/null +++ b/vendor/github.com/eapache/queue/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/eapache/queue/.travis.yml b/vendor/github.com/eapache/queue/.travis.yml new file mode 100644 index 0000000000..235a40a493 --- /dev/null +++ b/vendor/github.com/eapache/queue/.travis.yml @@ -0,0 +1,7 @@ +language: go +sudo: false + +go: + - 1.2 + - 1.3 + - 1.4 diff --git a/vendor/github.com/eapache/queue/LICENSE b/vendor/github.com/eapache/queue/LICENSE new file mode 100644 index 0000000000..d5f36dbcaa --- /dev/null +++ b/vendor/github.com/eapache/queue/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/eapache/queue/README.md b/vendor/github.com/eapache/queue/README.md new file mode 100644 index 0000000000..8e782335cd --- /dev/null +++ b/vendor/github.com/eapache/queue/README.md @@ -0,0 +1,16 @@ +Queue +===== + +[![Build Status](https://travis-ci.org/eapache/queue.svg)](https://travis-ci.org/eapache/queue) +[![GoDoc](https://godoc.org/github.com/eapache/queue?status.png)](https://godoc.org/github.com/eapache/queue) +[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) + +A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is in part because it is *not* thread-safe. + +Follows semantic versioning using https://gopkg.in/ - import from +[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1) +for guaranteed API stability. diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go new file mode 100644 index 0000000000..71d1acdf27 --- /dev/null +++ b/vendor/github.com/eapache/queue/queue.go @@ -0,0 +1,102 @@ +/* +Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe. +*/ +package queue + +// minQueueLen is smallest capacity that queue may have. +// Must be power of 2 for bitwise modulus: x % n == x & (n - 1). +const minQueueLen = 16 + +// Queue represents a single instance of the queue data structure. +type Queue struct { + buf []interface{} + head, tail, count int +} + +// New constructs and returns a new Queue. +func New() *Queue { + return &Queue{ + buf: make([]interface{}, minQueueLen), + } +} + +// Length returns the number of elements currently stored in the queue. +func (q *Queue) Length() int { + return q.count +} + +// resizes the queue to fit exactly twice its current contents +// this can result in shrinking if the queue is less than half-full +func (q *Queue) resize() { + newBuf := make([]interface{}, q.count<<1) + + if q.tail > q.head { + copy(newBuf, q.buf[q.head:q.tail]) + } else { + n := copy(newBuf, q.buf[q.head:]) + copy(newBuf[n:], q.buf[:q.tail]) + } + + q.head = 0 + q.tail = q.count + q.buf = newBuf +} + +// Add puts an element on the end of the queue. +func (q *Queue) Add(elem interface{}) { + if q.count == len(q.buf) { + q.resize() + } + + q.buf[q.tail] = elem + // bitwise modulus + q.tail = (q.tail + 1) & (len(q.buf) - 1) + q.count++ +} + +// Peek returns the element at the head of the queue. This call panics +// if the queue is empty. +func (q *Queue) Peek() interface{} { + if q.count <= 0 { + panic("queue: Peek() called on empty queue") + } + return q.buf[q.head] +} + +// Get returns the element at index i in the queue. If the index is +// invalid, the call will panic. This method accepts both positive and +// negative index values. Index 0 refers to the first element, and +// index -1 refers to the last. +func (q *Queue) Get(i int) interface{} { + // If indexing backwards, convert to positive index. + if i < 0 { + i += q.count + } + if i < 0 || i >= q.count { + panic("queue: Get() called with index out of range") + } + // bitwise modulus + return q.buf[(q.head+i)&(len(q.buf)-1)] +} + +// Remove removes and returns the element from the front of the queue. If the +// queue is empty, the call will panic. +func (q *Queue) Remove() interface{} { + if q.count <= 0 { + panic("queue: Remove() called on empty queue") + } + ret := q.buf[q.head] + q.buf[q.head] = nil + // bitwise modulus + q.head = (q.head + 1) & (len(q.buf) - 1) + q.count-- + // Resize down if buffer 1/4 full. + if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) { + q.resize() + } + return ret +} diff --git a/vendor/github.com/go-redis/redis/.gitignore b/vendor/github.com/go-redis/redis/.gitignore new file mode 100644 index 0000000000..ebfe903bcd --- /dev/null +++ b/vendor/github.com/go-redis/redis/.gitignore @@ -0,0 +1,2 @@ +*.rdb +testdata/*/ diff --git a/vendor/github.com/go-redis/redis/.travis.yml b/vendor/github.com/go-redis/redis/.travis.yml new file mode 100644 index 0000000000..39ffc2becb --- /dev/null +++ b/vendor/github.com/go-redis/redis/.travis.yml @@ -0,0 +1,20 @@ +sudo: false +language: go + +services: + - redis-server + +go: + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - tip + +matrix: + allow_failures: + - go: tip + +install: + - go get github.com/onsi/ginkgo + - go get github.com/onsi/gomega diff --git a/vendor/github.com/go-redis/redis/CHANGELOG.md b/vendor/github.com/go-redis/redis/CHANGELOG.md new file mode 100644 index 0000000000..cb0e1b8e97 --- /dev/null +++ b/vendor/github.com/go-redis/redis/CHANGELOG.md @@ -0,0 +1,12 @@ +# Changelog + +## v6.13 + +- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set `HashReplicas = 1000` for better keys distribution between shards. +- Cluster client was optimized to use much less memory when reloading cluster state. +- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout occurres. In most cases it is recommended to use PubSub.Channel instead. +- Dialer.KeepAlive is set to 5 minutes by default. + +## v6.12 + +- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis Servers that don't have cluster mode enabled. See https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup diff --git a/vendor/github.com/go-redis/redis/LICENSE b/vendor/github.com/go-redis/redis/LICENSE new file mode 100644 index 0000000000..298bed9bea --- /dev/null +++ b/vendor/github.com/go-redis/redis/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/go-redis/redis Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-redis/redis/Makefile b/vendor/github.com/go-redis/redis/Makefile new file mode 100644 index 0000000000..1fbdac91c5 --- /dev/null +++ b/vendor/github.com/go-redis/redis/Makefile @@ -0,0 +1,20 @@ +all: testdeps + go test ./... + go test ./... -short -race + env GOOS=linux GOARCH=386 go test ./... + go vet + +testdeps: testdata/redis/src/redis-server + +bench: testdeps + go test ./... -test.run=NONE -test.bench=. -test.benchmem + +.PHONY: all test testdeps bench + +testdata/redis: + mkdir -p $@ + wget -qO- https://github.com/antirez/redis/archive/unstable.tar.gz | tar xvz --strip-components=1 -C $@ + +testdata/redis/src/redis-server: testdata/redis + sed -i.bak 's/libjemalloc.a/libjemalloc.a -lrt/g' $ +} + +func ExampleClient() { + err := client.Set("key", "value", 0).Err() + if err != nil { + panic(err) + } + + val, err := client.Get("key").Result() + if err != nil { + panic(err) + } + fmt.Println("key", val) + + val2, err := client.Get("key2").Result() + if err == redis.Nil { + fmt.Println("key2 does not exist") + } else if err != nil { + panic(err) + } else { + fmt.Println("key2", val2) + } + // Output: key value + // key2 does not exist +} +``` + +## Howto + +Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-examples) to get an idea how to use this package. + +## Look and feel + +Some corner cases: + +```go +// SET key value EX 10 NX +set, err := client.SetNX("key", "value", 10*time.Second).Result() + +// SORT list LIMIT 0 2 ASC +vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() + +// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 +vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{ + Min: "-inf", + Max: "+inf", + Offset: 0, + Count: 2, +}).Result() + +// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM +vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result() + +// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" +vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() +``` + +## Benchmark + +go-redis vs redigo: + +``` +BenchmarkSetGoRedis10Conns64Bytes-4 200000 7621 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns64Bytes-4 200000 7554 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis10Conns1KB-4 200000 7697 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns1KB-4 200000 7688 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis10Conns10KB-4 200000 9214 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns10KB-4 200000 9181 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis10Conns1MB-4 2000 583242 ns/op 2337 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns1MB-4 2000 583089 ns/op 2338 B/op 6 allocs/op +BenchmarkSetRedigo10Conns64Bytes-4 200000 7576 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo100Conns64Bytes-4 200000 7782 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo10Conns1KB-4 200000 7958 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo100Conns1KB-4 200000 7725 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo10Conns10KB-4 100000 18442 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo100Conns10KB-4 100000 18818 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo10Conns1MB-4 2000 668829 ns/op 226 B/op 7 allocs/op +BenchmarkSetRedigo100Conns1MB-4 2000 679542 ns/op 226 B/op 7 allocs/op +``` + +Redis Cluster: + +``` +BenchmarkRedisPing-4 200000 6983 ns/op 116 B/op 4 allocs/op +BenchmarkRedisClusterPing-4 100000 11535 ns/op 117 B/op 4 allocs/op +``` + +## See also + +- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) +- [Golang msgpack](https://github.com/vmihailenco/msgpack) +- [Golang message task queue](https://github.com/go-msgqueue/msgqueue) diff --git a/vendor/github.com/go-redis/redis/cluster.go b/vendor/github.com/go-redis/redis/cluster.go new file mode 100644 index 0000000000..7a1af143e0 --- /dev/null +++ b/vendor/github.com/go-redis/redis/cluster.go @@ -0,0 +1,1624 @@ +package redis + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "math" + "math/rand" + "net" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/go-redis/redis/internal" + "github.com/go-redis/redis/internal/hashtag" + "github.com/go-redis/redis/internal/pool" + "github.com/go-redis/redis/internal/proto" + "github.com/go-redis/redis/internal/singleflight" +) + +var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") + +// ClusterOptions are used to configure a cluster client and should be +// passed to NewClusterClient. +type ClusterOptions struct { + // A seed list of host:port addresses of cluster nodes. + Addrs []string + + // The maximum number of retries before giving up. Command is retried + // on network errors and MOVED/ASK redirects. + // Default is 8 retries. + MaxRedirects int + + // Enables read-only commands on slave nodes. + ReadOnly bool + // Allows routing read-only commands to the closest master or slave node. + // It automatically enables ReadOnly. + RouteByLatency bool + // Allows routing read-only commands to the random master or slave node. + // It automatically enables ReadOnly. + RouteRandomly bool + + // Optional function that returns cluster slots information. + // It is useful to manually create cluster of standalone Redis servers + // and load-balance read/write operations between master and slaves. + // It can use service like ZooKeeper to maintain configuration information + // and Cluster.ReloadState to manually trigger state reloading. + ClusterSlots func() ([]ClusterSlot, error) + + // Following options are copied from Options struct. + + OnConnect func(*Conn) error + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + Password string + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + // PoolSize applies per cluster node and not for the whole cluster. + PoolSize int + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + + TLSConfig *tls.Config +} + +func (opt *ClusterOptions) init() { + if opt.MaxRedirects == -1 { + opt.MaxRedirects = 0 + } else if opt.MaxRedirects == 0 { + opt.MaxRedirects = 8 + } + + if opt.RouteByLatency || opt.RouteRandomly { + opt.ReadOnly = true + } + + switch opt.ReadTimeout { + case -1: + opt.ReadTimeout = 0 + case 0: + opt.ReadTimeout = 3 * time.Second + } + switch opt.WriteTimeout { + case -1: + opt.WriteTimeout = 0 + case 0: + opt.WriteTimeout = opt.ReadTimeout + } + + switch opt.MinRetryBackoff { + case -1: + opt.MinRetryBackoff = 0 + case 0: + opt.MinRetryBackoff = 8 * time.Millisecond + } + switch opt.MaxRetryBackoff { + case -1: + opt.MaxRetryBackoff = 0 + case 0: + opt.MaxRetryBackoff = 512 * time.Millisecond + } +} + +func (opt *ClusterOptions) clientOptions() *Options { + const disableIdleCheck = -1 + + return &Options{ + OnConnect: opt.OnConnect, + + MaxRetries: opt.MaxRetries, + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + Password: opt.Password, + readOnly: opt.ReadOnly, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + + IdleCheckFrequency: disableIdleCheck, + + TLSConfig: opt.TLSConfig, + } +} + +//------------------------------------------------------------------------------ + +type clusterNode struct { + Client *Client + + latency uint32 // atomic + generation uint32 // atomic + loading uint32 // atomic +} + +func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { + opt := clOpt.clientOptions() + opt.Addr = addr + node := clusterNode{ + Client: NewClient(opt), + } + + node.latency = math.MaxUint32 + if clOpt.RouteByLatency { + go node.updateLatency() + } + + return &node +} + +func (n *clusterNode) String() string { + return n.Client.String() +} + +func (n *clusterNode) Close() error { + return n.Client.Close() +} + +func (n *clusterNode) updateLatency() { + const probes = 10 + + var latency uint32 + for i := 0; i < probes; i++ { + start := time.Now() + n.Client.Ping() + probe := uint32(time.Since(start) / time.Microsecond) + latency = (latency + probe) / 2 + } + atomic.StoreUint32(&n.latency, latency) +} + +func (n *clusterNode) Latency() time.Duration { + latency := atomic.LoadUint32(&n.latency) + return time.Duration(latency) * time.Microsecond +} + +func (n *clusterNode) MarkAsLoading() { + atomic.StoreUint32(&n.loading, uint32(time.Now().Unix())) +} + +func (n *clusterNode) Loading() bool { + const minute = int64(time.Minute / time.Second) + + loading := atomic.LoadUint32(&n.loading) + if loading == 0 { + return false + } + if time.Now().Unix()-int64(loading) < minute { + return true + } + atomic.StoreUint32(&n.loading, 0) + return false +} + +func (n *clusterNode) Generation() uint32 { + return atomic.LoadUint32(&n.generation) +} + +func (n *clusterNode) SetGeneration(gen uint32) { + for { + v := atomic.LoadUint32(&n.generation) + if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) { + break + } + } +} + +//------------------------------------------------------------------------------ + +type clusterNodes struct { + opt *ClusterOptions + + mu sync.RWMutex + allAddrs []string + allNodes map[string]*clusterNode + clusterAddrs []string + closed bool + + nodeCreateGroup singleflight.Group + + _generation uint32 // atomic +} + +func newClusterNodes(opt *ClusterOptions) *clusterNodes { + return &clusterNodes{ + opt: opt, + + allAddrs: opt.Addrs, + allNodes: make(map[string]*clusterNode), + } +} + +func (c *clusterNodes) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil + } + c.closed = true + + var firstErr error + for _, node := range c.allNodes { + if err := node.Client.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + + c.allNodes = nil + c.clusterAddrs = nil + + return firstErr +} + +func (c *clusterNodes) Addrs() ([]string, error) { + var addrs []string + c.mu.RLock() + closed := c.closed + if !closed { + if len(c.clusterAddrs) > 0 { + addrs = c.clusterAddrs + } else { + addrs = c.allAddrs + } + } + c.mu.RUnlock() + + if closed { + return nil, pool.ErrClosed + } + if len(addrs) == 0 { + return nil, errClusterNoNodes + } + return addrs, nil +} + +func (c *clusterNodes) NextGeneration() uint32 { + return atomic.AddUint32(&c._generation, 1) +} + +// GC removes unused nodes. +func (c *clusterNodes) GC(generation uint32) { + var collected []*clusterNode + c.mu.Lock() + for addr, node := range c.allNodes { + if node.Generation() >= generation { + continue + } + + c.clusterAddrs = remove(c.clusterAddrs, addr) + delete(c.allNodes, addr) + collected = append(collected, node) + } + c.mu.Unlock() + + for _, node := range collected { + _ = node.Client.Close() + } +} + +func (c *clusterNodes) Get(addr string) (*clusterNode, error) { + var node *clusterNode + var err error + c.mu.RLock() + if c.closed { + err = pool.ErrClosed + } else { + node = c.allNodes[addr] + } + c.mu.RUnlock() + return node, err +} + +func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { + node, err := c.Get(addr) + if err != nil { + return nil, err + } + if node != nil { + return node, nil + } + + v, err := c.nodeCreateGroup.Do(addr, func() (interface{}, error) { + node := newClusterNode(c.opt, addr) + return node, nil + }) + + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil, pool.ErrClosed + } + + node, ok := c.allNodes[addr] + if ok { + _ = v.(*clusterNode).Close() + return node, err + } + node = v.(*clusterNode) + + c.allAddrs = appendIfNotExists(c.allAddrs, addr) + if err == nil { + c.clusterAddrs = append(c.clusterAddrs, addr) + } + c.allNodes[addr] = node + + return node, err +} + +func (c *clusterNodes) All() ([]*clusterNode, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.closed { + return nil, pool.ErrClosed + } + + cp := make([]*clusterNode, 0, len(c.allNodes)) + for _, node := range c.allNodes { + cp = append(cp, node) + } + return cp, nil +} + +func (c *clusterNodes) Random() (*clusterNode, error) { + addrs, err := c.Addrs() + if err != nil { + return nil, err + } + + n := rand.Intn(len(addrs)) + return c.GetOrCreate(addrs[n]) +} + +//------------------------------------------------------------------------------ + +type clusterSlot struct { + start, end int + nodes []*clusterNode +} + +type clusterSlotSlice []*clusterSlot + +func (p clusterSlotSlice) Len() int { + return len(p) +} + +func (p clusterSlotSlice) Less(i, j int) bool { + return p[i].start < p[j].start +} + +func (p clusterSlotSlice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +type clusterState struct { + nodes *clusterNodes + Masters []*clusterNode + Slaves []*clusterNode + + slots []*clusterSlot + + generation uint32 + createdAt time.Time +} + +func newClusterState( + nodes *clusterNodes, slots []ClusterSlot, origin string, +) (*clusterState, error) { + c := clusterState{ + nodes: nodes, + + slots: make([]*clusterSlot, 0, len(slots)), + + generation: nodes.NextGeneration(), + createdAt: time.Now(), + } + + isLoopbackOrigin := isLoopbackAddr(origin) + for _, slot := range slots { + var nodes []*clusterNode + for i, slotNode := range slot.Nodes { + addr := slotNode.Addr + if !isLoopbackOrigin && useOriginAddr(origin, addr) { + addr = origin + } + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return nil, err + } + + node.SetGeneration(c.generation) + nodes = append(nodes, node) + + if i == 0 { + c.Masters = appendUniqueNode(c.Masters, node) + } else { + c.Slaves = appendUniqueNode(c.Slaves, node) + } + } + + c.slots = append(c.slots, &clusterSlot{ + start: slot.Start, + end: slot.End, + nodes: nodes, + }) + } + + sort.Sort(clusterSlotSlice(c.slots)) + + time.AfterFunc(time.Minute, func() { + nodes.GC(c.generation) + }) + + return &c, nil +} + +func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) > 0 { + return nodes[0], nil + } + return c.nodes.Random() +} + +func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + switch len(nodes) { + case 0: + return c.nodes.Random() + case 1: + return nodes[0], nil + case 2: + if slave := nodes[1]; !slave.Loading() { + return slave, nil + } + return nodes[0], nil + default: + var slave *clusterNode + for i := 0; i < 10; i++ { + n := rand.Intn(len(nodes)-1) + 1 + slave = nodes[n] + if !slave.Loading() { + break + } + } + return slave, nil + } +} + +func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { + const threshold = time.Millisecond + + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + + var node *clusterNode + for _, n := range nodes { + if n.Loading() { + continue + } + if node == nil || node.Latency()-n.Latency() > threshold { + node = n + } + } + return node, nil +} + +func (c *clusterState) slotRandomNode(slot int) *clusterNode { + nodes := c.slotNodes(slot) + n := rand.Intn(len(nodes)) + return nodes[n] +} + +func (c *clusterState) slotNodes(slot int) []*clusterNode { + i := sort.Search(len(c.slots), func(i int) bool { + return c.slots[i].end >= slot + }) + if i >= len(c.slots) { + return nil + } + x := c.slots[i] + if slot >= x.start && slot <= x.end { + return x.nodes + } + return nil +} + +func (c *clusterState) IsConsistent() bool { + if c.nodes.opt.ClusterSlots != nil { + return true + } + return len(c.Masters) <= len(c.Slaves) +} + +//------------------------------------------------------------------------------ + +type clusterStateHolder struct { + load func() (*clusterState, error) + + state atomic.Value + + firstErrMu sync.RWMutex + firstErr error + + reloading uint32 // atomic +} + +func newClusterStateHolder(fn func() (*clusterState, error)) *clusterStateHolder { + return &clusterStateHolder{ + load: fn, + } +} + +func (c *clusterStateHolder) Reload() (*clusterState, error) { + state, err := c.reload() + if err != nil { + return nil, err + } + if !state.IsConsistent() { + time.AfterFunc(time.Second, c.LazyReload) + } + return state, nil +} + +func (c *clusterStateHolder) reload() (*clusterState, error) { + state, err := c.load() + if err != nil { + c.firstErrMu.Lock() + if c.firstErr == nil { + c.firstErr = err + } + c.firstErrMu.Unlock() + return nil, err + } + c.state.Store(state) + return state, nil +} + +func (c *clusterStateHolder) LazyReload() { + if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { + return + } + go func() { + defer atomic.StoreUint32(&c.reloading, 0) + + for { + state, err := c.reload() + if err != nil { + return + } + time.Sleep(100 * time.Millisecond) + if state.IsConsistent() { + return + } + } + }() +} + +func (c *clusterStateHolder) Get() (*clusterState, error) { + v := c.state.Load() + if v != nil { + state := v.(*clusterState) + if time.Since(state.createdAt) > time.Minute { + c.LazyReload() + } + return state, nil + } + + c.firstErrMu.RLock() + err := c.firstErr + c.firstErrMu.RUnlock() + if err != nil { + return nil, err + } + + return nil, errors.New("redis: cluster has no state") +} + +func (c *clusterStateHolder) ReloadOrGet() (*clusterState, error) { + state, err := c.Reload() + if err == nil { + return state, nil + } + return c.Get() +} + +//------------------------------------------------------------------------------ + +// ClusterClient is a Redis Cluster client representing a pool of zero +// or more underlying connections. It's safe for concurrent use by +// multiple goroutines. +type ClusterClient struct { + cmdable + + ctx context.Context + + opt *ClusterOptions + nodes *clusterNodes + state *clusterStateHolder + cmdsInfoCache *cmdsInfoCache + + process func(Cmder) error + processPipeline func([]Cmder) error + processTxPipeline func([]Cmder) error +} + +// NewClusterClient returns a Redis Cluster client as described in +// http://redis.io/topics/cluster-spec. +func NewClusterClient(opt *ClusterOptions) *ClusterClient { + opt.init() + + c := &ClusterClient{ + opt: opt, + nodes: newClusterNodes(opt), + } + c.state = newClusterStateHolder(c.loadState) + c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) + + c.process = c.defaultProcess + c.processPipeline = c.defaultProcessPipeline + c.processTxPipeline = c.defaultProcessTxPipeline + + c.init() + + _, _ = c.state.Reload() + _, _ = c.cmdsInfoCache.Get() + + if opt.IdleCheckFrequency > 0 { + go c.reaper(opt.IdleCheckFrequency) + } + + return c +} + +// ReloadState reloads cluster state. It calls ClusterSlots func +// to get cluster slots information. +func (c *ClusterClient) ReloadState() error { + _, err := c.state.Reload() + return err +} + +func (c *ClusterClient) init() { + c.cmdable.setProcessor(c.Process) +} + +func (c *ClusterClient) Context() context.Context { + if c.ctx != nil { + return c.ctx + } + return context.Background() +} + +func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient { + if ctx == nil { + panic("nil context") + } + c2 := c.copy() + c2.ctx = ctx + return c2 +} + +func (c *ClusterClient) copy() *ClusterClient { + cp := *c + cp.init() + return &cp +} + +// Options returns read-only Options that were used to create the client. +func (c *ClusterClient) Options() *ClusterOptions { + return c.opt +} + +func (c *ClusterClient) retryBackoff(attempt int) time.Duration { + return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) +} + +func (c *ClusterClient) cmdsInfo() (map[string]*CommandInfo, error) { + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + for _, addr := range addrs { + node, err := c.nodes.Get(addr) + if err != nil { + return nil, err + } + if node == nil { + continue + } + + info, err := node.Client.Command().Result() + if err == nil { + return info, nil + } + if firstErr == nil { + firstErr = err + } + } + return nil, firstErr +} + +func (c *ClusterClient) cmdInfo(name string) *CommandInfo { + cmdsInfo, err := c.cmdsInfoCache.Get() + if err != nil { + return nil + } + + info := cmdsInfo[name] + if info == nil { + internal.Logf("info for cmd=%s not found", name) + } + return info +} + +func cmdSlot(cmd Cmder, pos int) int { + if pos == 0 { + return hashtag.RandomSlot() + } + firstKey := cmd.stringArg(pos) + return hashtag.Slot(firstKey) +} + +func (c *ClusterClient) cmdSlot(cmd Cmder) int { + cmdInfo := c.cmdInfo(cmd.Name()) + return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) +} + +func (c *ClusterClient) cmdSlotAndNode(cmd Cmder) (int, *clusterNode, error) { + state, err := c.state.Get() + if err != nil { + return 0, nil, err + } + + cmdInfo := c.cmdInfo(cmd.Name()) + slot := cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) + + if cmdInfo != nil && cmdInfo.ReadOnly && c.opt.ReadOnly { + if c.opt.RouteByLatency { + node, err := state.slotClosestNode(slot) + return slot, node, err + } + + if c.opt.RouteRandomly { + node := state.slotRandomNode(slot) + return slot, node, nil + } + + node, err := state.slotSlaveNode(slot) + return slot, node, err + } + + node, err := state.slotMasterNode(slot) + return slot, node, err +} + +func (c *ClusterClient) slotMasterNode(slot int) (*clusterNode, error) { + state, err := c.state.Get() + if err != nil { + return nil, err + } + + nodes := state.slotNodes(slot) + if len(nodes) > 0 { + return nodes[0], nil + } + return c.nodes.Random() +} + +func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error { + if len(keys) == 0 { + return fmt.Errorf("redis: Watch requires at least one key") + } + + slot := hashtag.Slot(keys[0]) + for _, key := range keys[1:] { + if hashtag.Slot(key) != slot { + err := fmt.Errorf("redis: Watch requires all keys to be in the same slot") + return err + } + } + + node, err := c.slotMasterNode(slot) + if err != nil { + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + err = node.Client.Watch(fn, keys...) + if err == nil { + break + } + + if internal.IsRetryableError(err, true) { + c.state.LazyReload() + continue + } + + moved, ask, addr := internal.IsMovedError(err) + if moved || ask { + c.state.LazyReload() + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + continue + } + + if err == pool.ErrClosed { + node, err = c.slotMasterNode(slot) + if err != nil { + return err + } + continue + } + + return err + } + + return err +} + +// Close closes the cluster client, releasing any open resources. +// +// It is rare to Close a ClusterClient, as the ClusterClient is meant +// to be long-lived and shared between many goroutines. +func (c *ClusterClient) Close() error { + return c.nodes.Close() +} + +func (c *ClusterClient) WrapProcess( + fn func(oldProcess func(Cmder) error) func(Cmder) error, +) { + c.process = fn(c.process) +} + +func (c *ClusterClient) Process(cmd Cmder) error { + return c.process(cmd) +} + +func (c *ClusterClient) defaultProcess(cmd Cmder) error { + var node *clusterNode + var ask bool + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + if node == nil { + var err error + _, node, err = c.cmdSlotAndNode(cmd) + if err != nil { + cmd.setErr(err) + break + } + } + + var err error + if ask { + pipe := node.Client.Pipeline() + _ = pipe.Process(NewCmd("ASKING")) + _ = pipe.Process(cmd) + _, err = pipe.Exec() + _ = pipe.Close() + ask = false + } else { + err = node.Client.Process(cmd) + } + + // If there is no error - we are done. + if err == nil { + break + } + + // If slave is loading - read from master. + if c.opt.ReadOnly && internal.IsLoadingError(err) { + node.MarkAsLoading() + continue + } + + if internal.IsRetryableError(err, true) { + c.state.LazyReload() + + // First retry the same node. + if attempt == 0 { + continue + } + + // Second try random node. + node, err = c.nodes.Random() + if err != nil { + break + } + continue + } + + var moved bool + var addr string + moved, ask, addr = internal.IsMovedError(err) + if moved || ask { + c.state.LazyReload() + + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + break + } + continue + } + + if err == pool.ErrClosed { + node = nil + continue + } + + break + } + + return cmd.Err() +} + +// ForEachMaster concurrently calls the fn on each master node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error { + state, err := c.state.ReloadOrGet() + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + for _, master := range state.Masters { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(master) + } + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachSlave concurrently calls the fn on each slave node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error { + state, err := c.state.ReloadOrGet() + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + for _, slave := range state.Slaves { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(slave) + } + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachNode concurrently calls the fn on each known node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error { + state, err := c.state.ReloadOrGet() + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + worker := func(node *clusterNode) { + defer wg.Done() + err := fn(node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + } + + for _, node := range state.Masters { + wg.Add(1) + go worker(node) + } + for _, node := range state.Slaves { + wg.Add(1) + go worker(node) + } + + wg.Wait() + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// PoolStats returns accumulated connection pool stats. +func (c *ClusterClient) PoolStats() *PoolStats { + var acc PoolStats + + state, _ := c.state.Get() + if state == nil { + return &acc + } + + for _, node := range state.Masters { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.FreeConns += s.FreeConns + acc.StaleConns += s.StaleConns + } + + for _, node := range state.Slaves { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.FreeConns += s.FreeConns + acc.StaleConns += s.StaleConns + } + + return &acc +} + +func (c *ClusterClient) loadState() (*clusterState, error) { + if c.opt.ClusterSlots != nil { + slots, err := c.opt.ClusterSlots() + if err != nil { + return nil, err + } + return newClusterState(c.nodes, slots, "") + } + + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + for _, addr := range addrs { + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + slots, err := node.Client.ClusterSlots().Result() + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + return newClusterState(c.nodes, slots, node.Client.opt.Addr) + } + + return nil, firstErr +} + +// reaper closes idle connections to the cluster. +func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) { + ticker := time.NewTicker(idleCheckFrequency) + defer ticker.Stop() + + for range ticker.C { + nodes, err := c.nodes.All() + if err != nil { + break + } + + for _, node := range nodes { + _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns() + if err != nil { + internal.Logf("ReapStaleConns failed: %s", err) + } + } + } +} + +func (c *ClusterClient) Pipeline() Pipeliner { + pipe := Pipeline{ + exec: c.processPipeline, + } + pipe.statefulCmdable.setProcessor(pipe.Process) + return &pipe +} + +func (c *ClusterClient) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(fn) +} + +func (c *ClusterClient) WrapProcessPipeline( + fn func(oldProcess func([]Cmder) error) func([]Cmder) error, +) { + c.processPipeline = fn(c.processPipeline) +} + +func (c *ClusterClient) defaultProcessPipeline(cmds []Cmder) error { + cmdsMap, err := c.mapCmdsByNode(cmds) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + failedCmds := make(map[*clusterNode][]Cmder) + + for node, cmds := range cmdsMap { + cn, err := node.Client.getConn() + if err != nil { + if err == pool.ErrClosed { + c.remapCmds(cmds, failedCmds) + } else { + setCmdsErr(cmds, err) + } + continue + } + + err = c.pipelineProcessCmds(node, cn, cmds, failedCmds) + if err == nil || internal.IsRedisError(err) { + node.Client.connPool.Put(cn) + } else { + node.Client.connPool.Remove(cn) + } + } + + if len(failedCmds) == 0 { + break + } + cmdsMap = failedCmds + } + + return firstCmdsErr(cmds) +} + +func (c *ClusterClient) mapCmdsByNode(cmds []Cmder) (map[*clusterNode][]Cmder, error) { + state, err := c.state.Get() + if err != nil { + setCmdsErr(cmds, err) + return nil, err + } + + cmdsMap := make(map[*clusterNode][]Cmder) + cmdsAreReadOnly := c.cmdsAreReadOnly(cmds) + for _, cmd := range cmds { + var node *clusterNode + var err error + if cmdsAreReadOnly { + _, node, err = c.cmdSlotAndNode(cmd) + } else { + slot := c.cmdSlot(cmd) + node, err = state.slotMasterNode(slot) + } + if err != nil { + return nil, err + } + cmdsMap[node] = append(cmdsMap[node], cmd) + } + return cmdsMap, nil +} + +func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { + for _, cmd := range cmds { + cmdInfo := c.cmdInfo(cmd.Name()) + if cmdInfo == nil || !cmdInfo.ReadOnly { + return false + } + } + return true +} + +func (c *ClusterClient) remapCmds(cmds []Cmder, failedCmds map[*clusterNode][]Cmder) { + remappedCmds, err := c.mapCmdsByNode(cmds) + if err != nil { + setCmdsErr(cmds, err) + return + } + + for node, cmds := range remappedCmds { + failedCmds[node] = cmds + } +} + +func (c *ClusterClient) pipelineProcessCmds( + node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder, +) error { + cn.SetWriteTimeout(c.opt.WriteTimeout) + + err := writeCmd(cn, cmds...) + if err != nil { + setCmdsErr(cmds, err) + failedCmds[node] = cmds + return err + } + + // Set read timeout for all commands. + cn.SetReadTimeout(c.opt.ReadTimeout) + + return c.pipelineReadCmds(cn, cmds, failedCmds) +} + +func (c *ClusterClient) pipelineReadCmds( + cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder, +) error { + for _, cmd := range cmds { + err := cmd.readReply(cn) + if err == nil { + continue + } + + if c.checkMovedErr(cmd, err, failedCmds) { + continue + } + + if internal.IsRedisError(err) { + continue + } + + return err + } + return nil +} + +func (c *ClusterClient) checkMovedErr( + cmd Cmder, err error, failedCmds map[*clusterNode][]Cmder, +) bool { + moved, ask, addr := internal.IsMovedError(err) + + if moved { + c.state.LazyReload() + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return false + } + + failedCmds[node] = append(failedCmds[node], cmd) + return true + } + + if ask { + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return false + } + + failedCmds[node] = append(failedCmds[node], NewCmd("ASKING"), cmd) + return true + } + + return false +} + +// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. +func (c *ClusterClient) TxPipeline() Pipeliner { + pipe := Pipeline{ + exec: c.processTxPipeline, + } + pipe.statefulCmdable.setProcessor(pipe.Process) + return &pipe +} + +func (c *ClusterClient) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(fn) +} + +func (c *ClusterClient) defaultProcessTxPipeline(cmds []Cmder) error { + state, err := c.state.Get() + if err != nil { + return err + } + + cmdsMap := c.mapCmdsBySlot(cmds) + for slot, cmds := range cmdsMap { + node, err := state.slotMasterNode(slot) + if err != nil { + setCmdsErr(cmds, err) + continue + } + cmdsMap := map[*clusterNode][]Cmder{node: cmds} + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + failedCmds := make(map[*clusterNode][]Cmder) + + for node, cmds := range cmdsMap { + cn, err := node.Client.getConn() + if err != nil { + if err == pool.ErrClosed { + c.remapCmds(cmds, failedCmds) + } else { + setCmdsErr(cmds, err) + } + continue + } + + err = c.txPipelineProcessCmds(node, cn, cmds, failedCmds) + if err == nil || internal.IsRedisError(err) { + node.Client.connPool.Put(cn) + } else { + node.Client.connPool.Remove(cn) + } + } + + if len(failedCmds) == 0 { + break + } + cmdsMap = failedCmds + } + } + + return firstCmdsErr(cmds) +} + +func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { + cmdsMap := make(map[int][]Cmder) + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + cmdsMap[slot] = append(cmdsMap[slot], cmd) + } + return cmdsMap +} + +func (c *ClusterClient) txPipelineProcessCmds( + node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder, +) error { + cn.SetWriteTimeout(c.opt.WriteTimeout) + if err := txPipelineWriteMulti(cn, cmds); err != nil { + setCmdsErr(cmds, err) + failedCmds[node] = cmds + return err + } + + // Set read timeout for all commands. + cn.SetReadTimeout(c.opt.ReadTimeout) + + if err := c.txPipelineReadQueued(cn, cmds, failedCmds); err != nil { + setCmdsErr(cmds, err) + return err + } + + return pipelineReadCmds(cn, cmds) +} + +func (c *ClusterClient) txPipelineReadQueued( + cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder, +) error { + // Parse queued replies. + var statusCmd StatusCmd + if err := statusCmd.readReply(cn); err != nil { + return err + } + + for _, cmd := range cmds { + err := statusCmd.readReply(cn) + if err == nil { + continue + } + + if c.checkMovedErr(cmd, err, failedCmds) || internal.IsRedisError(err) { + continue + } + + return err + } + + // Parse number of replies. + line, err := cn.Rd.ReadLine() + if err != nil { + if err == Nil { + err = TxFailedErr + } + return err + } + + switch line[0] { + case proto.ErrorReply: + err := proto.ParseErrorReply(line) + for _, cmd := range cmds { + if !c.checkMovedErr(cmd, err, failedCmds) { + break + } + } + return err + case proto.ArrayReply: + // ok + default: + err := fmt.Errorf("redis: expected '*', but got line %q", line) + return err + } + + return nil +} + +func (c *ClusterClient) pubSub(channels []string) *PubSub { + var node *clusterNode + pubsub := &PubSub{ + opt: c.opt.clientOptions(), + + newConn: func(channels []string) (*pool.Conn, error) { + if node == nil { + var slot int + if len(channels) > 0 { + slot = hashtag.Slot(channels[0]) + } else { + slot = -1 + } + + masterNode, err := c.slotMasterNode(slot) + if err != nil { + return nil, err + } + node = masterNode + } + return node.Client.newConn() + }, + closeConn: func(cn *pool.Conn) error { + return node.Client.connPool.CloseConn(cn) + }, + } + pubsub.init() + return pubsub +} + +// Subscribe subscribes the client to the specified channels. +// Channels can be omitted to create empty subscription. +func (c *ClusterClient) Subscribe(channels ...string) *PubSub { + pubsub := c.pubSub(channels) + if len(channels) > 0 { + _ = pubsub.Subscribe(channels...) + } + return pubsub +} + +// PSubscribe subscribes the client to the given patterns. +// Patterns can be omitted to create empty subscription. +func (c *ClusterClient) PSubscribe(channels ...string) *PubSub { + pubsub := c.pubSub(channels) + if len(channels) > 0 { + _ = pubsub.PSubscribe(channels...) + } + return pubsub +} + +func useOriginAddr(originAddr, nodeAddr string) bool { + nodeHost, nodePort, err := net.SplitHostPort(nodeAddr) + if err != nil { + return false + } + + nodeIP := net.ParseIP(nodeHost) + if nodeIP == nil { + return false + } + + if !nodeIP.IsLoopback() { + return false + } + + _, originPort, err := net.SplitHostPort(originAddr) + if err != nil { + return false + } + + return nodePort == originPort +} + +func isLoopbackAddr(addr string) bool { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return false + } + + ip := net.ParseIP(host) + if ip == nil { + return false + } + + return ip.IsLoopback() +} + +func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { + for _, n := range nodes { + if n == node { + return nodes + } + } + return append(nodes, node) +} + +func appendIfNotExists(ss []string, es ...string) []string { +loop: + for _, e := range es { + for _, s := range ss { + if s == e { + continue loop + } + } + ss = append(ss, e) + } + return ss +} + +func remove(ss []string, es ...string) []string { + if len(es) == 0 { + return ss[:0] + } + for _, e := range es { + for i, s := range ss { + if s == e { + ss = append(ss[:i], ss[i+1:]...) + break + } + } + } + return ss +} diff --git a/vendor/github.com/go-redis/redis/cluster_commands.go b/vendor/github.com/go-redis/redis/cluster_commands.go new file mode 100644 index 0000000000..dff62c902d --- /dev/null +++ b/vendor/github.com/go-redis/redis/cluster_commands.go @@ -0,0 +1,22 @@ +package redis + +import "sync/atomic" + +func (c *ClusterClient) DBSize() *IntCmd { + cmd := NewIntCmd("dbsize") + var size int64 + err := c.ForEachMaster(func(master *Client) error { + n, err := master.DBSize().Result() + if err != nil { + return err + } + atomic.AddInt64(&size, n) + return nil + }) + if err != nil { + cmd.setErr(err) + return cmd + } + cmd.val = size + return cmd +} diff --git a/vendor/github.com/go-redis/redis/command.go b/vendor/github.com/go-redis/redis/command.go new file mode 100644 index 0000000000..11472bec1b --- /dev/null +++ b/vendor/github.com/go-redis/redis/command.go @@ -0,0 +1,1225 @@ +package redis + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "time" + + "github.com/go-redis/redis/internal" + "github.com/go-redis/redis/internal/pool" + "github.com/go-redis/redis/internal/proto" + "github.com/go-redis/redis/internal/util" +) + +type Cmder interface { + Name() string + Args() []interface{} + stringArg(int) string + + readReply(*pool.Conn) error + setErr(error) + + readTimeout() *time.Duration + + Err() error + fmt.Stringer +} + +func setCmdsErr(cmds []Cmder, e error) { + for _, cmd := range cmds { + if cmd.Err() == nil { + cmd.setErr(e) + } + } +} + +func firstCmdsErr(cmds []Cmder) error { + for _, cmd := range cmds { + if err := cmd.Err(); err != nil { + return err + } + } + return nil +} + +func writeCmd(cn *pool.Conn, cmds ...Cmder) error { + cn.Wb.Reset() + for _, cmd := range cmds { + if err := cn.Wb.Append(cmd.Args()); err != nil { + return err + } + } + + _, err := cn.Write(cn.Wb.Bytes()) + return err +} + +func cmdString(cmd Cmder, val interface{}) string { + var ss []string + for _, arg := range cmd.Args() { + ss = append(ss, fmt.Sprint(arg)) + } + s := strings.Join(ss, " ") + if err := cmd.Err(); err != nil { + return s + ": " + err.Error() + } + if val != nil { + switch vv := val.(type) { + case []byte: + return s + ": " + string(vv) + default: + return s + ": " + fmt.Sprint(val) + } + } + return s + +} + +func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { + switch cmd.Name() { + case "eval", "evalsha": + if cmd.stringArg(2) != "0" { + return 3 + } + + return 0 + case "publish": + return 1 + } + if info == nil { + return 0 + } + return int(info.FirstKeyPos) +} + +//------------------------------------------------------------------------------ + +type baseCmd struct { + _args []interface{} + err error + + _readTimeout *time.Duration +} + +var _ Cmder = (*Cmd)(nil) + +func (cmd *baseCmd) Err() error { + return cmd.err +} + +func (cmd *baseCmd) Args() []interface{} { + return cmd._args +} + +func (cmd *baseCmd) stringArg(pos int) string { + if pos < 0 || pos >= len(cmd._args) { + return "" + } + s, _ := cmd._args[pos].(string) + return s +} + +func (cmd *baseCmd) Name() string { + if len(cmd._args) > 0 { + // Cmd name must be lower cased. + s := internal.ToLower(cmd.stringArg(0)) + cmd._args[0] = s + return s + } + return "" +} + +func (cmd *baseCmd) readTimeout() *time.Duration { + return cmd._readTimeout +} + +func (cmd *baseCmd) setReadTimeout(d time.Duration) { + cmd._readTimeout = &d +} + +func (cmd *baseCmd) setErr(e error) { + cmd.err = e +} + +//------------------------------------------------------------------------------ + +type Cmd struct { + baseCmd + + val interface{} +} + +func NewCmd(args ...interface{}) *Cmd { + return &Cmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *Cmd) Val() interface{} { + return cmd.val +} + +func (cmd *Cmd) Result() (interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *Cmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *Cmd) readReply(cn *pool.Conn) error { + cmd.val, cmd.err = cn.Rd.ReadReply(sliceParser) + if cmd.err != nil { + return cmd.err + } + if b, ok := cmd.val.([]byte); ok { + // Bytes must be copied, because underlying memory is reused. + cmd.val = string(b) + } + return nil +} + +//------------------------------------------------------------------------------ + +type SliceCmd struct { + baseCmd + + val []interface{} +} + +var _ Cmder = (*SliceCmd)(nil) + +func NewSliceCmd(args ...interface{}) *SliceCmd { + return &SliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *SliceCmd) Val() []interface{} { + return cmd.val +} + +func (cmd *SliceCmd) Result() ([]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *SliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *SliceCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(sliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]interface{}) + return nil +} + +//------------------------------------------------------------------------------ + +type StatusCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StatusCmd)(nil) + +func NewStatusCmd(args ...interface{}) *StatusCmd { + return &StatusCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StatusCmd) Val() string { + return cmd.val +} + +func (cmd *StatusCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StatusCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StatusCmd) readReply(cn *pool.Conn) error { + cmd.val, cmd.err = cn.Rd.ReadStringReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type IntCmd struct { + baseCmd + + val int64 +} + +var _ Cmder = (*IntCmd)(nil) + +func NewIntCmd(args ...interface{}) *IntCmd { + return &IntCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *IntCmd) Val() int64 { + return cmd.val +} + +func (cmd *IntCmd) Result() (int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntCmd) readReply(cn *pool.Conn) error { + cmd.val, cmd.err = cn.Rd.ReadIntReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type DurationCmd struct { + baseCmd + + val time.Duration + precision time.Duration +} + +var _ Cmder = (*DurationCmd)(nil) + +func NewDurationCmd(precision time.Duration, args ...interface{}) *DurationCmd { + return &DurationCmd{ + baseCmd: baseCmd{_args: args}, + precision: precision, + } +} + +func (cmd *DurationCmd) Val() time.Duration { + return cmd.val +} + +func (cmd *DurationCmd) Result() (time.Duration, error) { + return cmd.val, cmd.err +} + +func (cmd *DurationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DurationCmd) readReply(cn *pool.Conn) error { + var n int64 + n, cmd.err = cn.Rd.ReadIntReply() + if cmd.err != nil { + return cmd.err + } + cmd.val = time.Duration(n) * cmd.precision + return nil +} + +//------------------------------------------------------------------------------ + +type TimeCmd struct { + baseCmd + + val time.Time +} + +var _ Cmder = (*TimeCmd)(nil) + +func NewTimeCmd(args ...interface{}) *TimeCmd { + return &TimeCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *TimeCmd) Val() time.Time { + return cmd.val +} + +func (cmd *TimeCmd) Result() (time.Time, error) { + return cmd.val, cmd.err +} + +func (cmd *TimeCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *TimeCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(timeParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(time.Time) + return nil +} + +//------------------------------------------------------------------------------ + +type BoolCmd struct { + baseCmd + + val bool +} + +var _ Cmder = (*BoolCmd)(nil) + +func NewBoolCmd(args ...interface{}) *BoolCmd { + return &BoolCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *BoolCmd) Val() bool { + return cmd.val +} + +func (cmd *BoolCmd) Result() (bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolCmd) String() string { + return cmdString(cmd, cmd.val) +} + +var ok = []byte("OK") + +func (cmd *BoolCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadReply(nil) + // `SET key value NX` returns nil when key already exists. But + // `SETNX key value` returns bool (0/1). So convert nil to bool. + // TODO: is this okay? + if cmd.err == Nil { + cmd.val = false + cmd.err = nil + return nil + } + if cmd.err != nil { + return cmd.err + } + switch v := v.(type) { + case int64: + cmd.val = v == 1 + return nil + case []byte: + cmd.val = bytes.Equal(v, ok) + return nil + default: + cmd.err = fmt.Errorf("got %T, wanted int64 or string", v) + return cmd.err + } +} + +//------------------------------------------------------------------------------ + +type StringCmd struct { + baseCmd + + val []byte +} + +var _ Cmder = (*StringCmd)(nil) + +func NewStringCmd(args ...interface{}) *StringCmd { + return &StringCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringCmd) Val() string { + return util.BytesToString(cmd.val) +} + +func (cmd *StringCmd) Result() (string, error) { + return cmd.Val(), cmd.err +} + +func (cmd *StringCmd) Bytes() ([]byte, error) { + return cmd.val, cmd.err +} + +func (cmd *StringCmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseInt(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseUint(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseFloat(cmd.Val(), 64) +} + +func (cmd *StringCmd) Scan(val interface{}) error { + if cmd.err != nil { + return cmd.err + } + return proto.Scan(cmd.val, val) +} + +func (cmd *StringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringCmd) readReply(cn *pool.Conn) error { + cmd.val, cmd.err = cn.Rd.ReadBytesReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type FloatCmd struct { + baseCmd + + val float64 +} + +var _ Cmder = (*FloatCmd)(nil) + +func NewFloatCmd(args ...interface{}) *FloatCmd { + return &FloatCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *FloatCmd) Val() float64 { + return cmd.val +} + +func (cmd *FloatCmd) Result() (float64, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *FloatCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatCmd) readReply(cn *pool.Conn) error { + cmd.val, cmd.err = cn.Rd.ReadFloatReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type StringSliceCmd struct { + baseCmd + + val []string +} + +var _ Cmder = (*StringSliceCmd)(nil) + +func NewStringSliceCmd(args ...interface{}) *StringSliceCmd { + return &StringSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringSliceCmd) Val() []string { + return cmd.val +} + +func (cmd *StringSliceCmd) Result() ([]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *StringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { + return proto.ScanSlice(cmd.Val(), container) +} + +func (cmd *StringSliceCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(stringSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]string) + return nil +} + +//------------------------------------------------------------------------------ + +type BoolSliceCmd struct { + baseCmd + + val []bool +} + +var _ Cmder = (*BoolSliceCmd)(nil) + +func NewBoolSliceCmd(args ...interface{}) *BoolSliceCmd { + return &BoolSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *BoolSliceCmd) Val() []bool { + return cmd.val +} + +func (cmd *BoolSliceCmd) Result() ([]bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolSliceCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(boolSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]bool) + return nil +} + +//------------------------------------------------------------------------------ + +type StringStringMapCmd struct { + baseCmd + + val map[string]string +} + +var _ Cmder = (*StringStringMapCmd)(nil) + +func NewStringStringMapCmd(args ...interface{}) *StringStringMapCmd { + return &StringStringMapCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringStringMapCmd) Val() map[string]string { + return cmd.val +} + +func (cmd *StringStringMapCmd) Result() (map[string]string, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStringMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStringMapCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(stringStringMapParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]string) + return nil +} + +//------------------------------------------------------------------------------ + +type StringIntMapCmd struct { + baseCmd + + val map[string]int64 +} + +var _ Cmder = (*StringIntMapCmd)(nil) + +func NewStringIntMapCmd(args ...interface{}) *StringIntMapCmd { + return &StringIntMapCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringIntMapCmd) Val() map[string]int64 { + return cmd.val +} + +func (cmd *StringIntMapCmd) Result() (map[string]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *StringIntMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringIntMapCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(stringIntMapParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]int64) + return nil +} + +//------------------------------------------------------------------------------ + +type StringStructMapCmd struct { + baseCmd + + val map[string]struct{} +} + +var _ Cmder = (*StringStructMapCmd)(nil) + +func NewStringStructMapCmd(args ...interface{}) *StringStructMapCmd { + return &StringStructMapCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringStructMapCmd) Val() map[string]struct{} { + return cmd.val +} + +func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStructMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStructMapCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(stringStructMapParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]struct{}) + return nil +} + +//------------------------------------------------------------------------------ + +type XStream struct { + Stream string + Messages []*XMessage +} + +type XMessage struct { + ID string + Values map[string]interface{} +} + +//------------------------------------------------------------------------------ + +type XStreamSliceCmd struct { + baseCmd + + val []*XStream +} + +var _ Cmder = (*XStreamSliceCmd)(nil) + +func NewXStreamSliceCmd(args ...interface{}) *XStreamSliceCmd { + return &XStreamSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XStreamSliceCmd) Val() []*XStream { + return cmd.val +} + +func (cmd *XStreamSliceCmd) Result() ([]*XStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XStreamSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XStreamSliceCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(xStreamSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]*XStream) + return nil +} + +// Implements proto.MultiBulkParse +func xStreamSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + xx := make([]*XStream, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadArrayReply(xStreamParser) + if err != nil { + return nil, err + } + xx[i] = v.(*XStream) + } + return xx, nil +} + +// Implements proto.MultiBulkParse +func xStreamParser(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + stream, err := rd.ReadStringReply() + if err != nil { + return nil, err + } + + v, err := rd.ReadArrayReply(xMessageSliceParser) + if err != nil { + return nil, err + } + + return &XStream{ + Stream: stream, + Messages: v.([]*XMessage), + }, nil +} + +//------------------------------------------------------------------------------ + +type XMessageSliceCmd struct { + baseCmd + + val []*XMessage +} + +var _ Cmder = (*XMessageSliceCmd)(nil) + +func NewXMessageSliceCmd(args ...interface{}) *XMessageSliceCmd { + return &XMessageSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XMessageSliceCmd) Val() []*XMessage { + return cmd.val +} + +func (cmd *XMessageSliceCmd) Result() ([]*XMessage, error) { + return cmd.val, cmd.err +} + +func (cmd *XMessageSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XMessageSliceCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(xMessageSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]*XMessage) + return nil +} + +// Implements proto.MultiBulkParse +func xMessageSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + msgs := make([]*XMessage, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadArrayReply(xMessageParser) + if err != nil { + return nil, err + } + msgs[i] = v.(*XMessage) + } + return msgs, nil +} + +// Implements proto.MultiBulkParse +func xMessageParser(rd *proto.Reader, n int64) (interface{}, error) { + id, err := rd.ReadStringReply() + if err != nil { + return nil, err + } + + v, err := rd.ReadArrayReply(xKeyValueParser) + if err != nil { + return nil, err + } + + return &XMessage{ + ID: id, + Values: v.(map[string]interface{}), + }, nil +} + +// Implements proto.MultiBulkParse +func xKeyValueParser(rd *proto.Reader, n int64) (interface{}, error) { + values := make(map[string]interface{}, n) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadStringReply() + if err != nil { + return nil, err + } + + value, err := rd.ReadStringReply() + if err != nil { + return nil, err + } + + values[key] = value + } + return values, nil +} + +//------------------------------------------------------------------------------ + +type ZSliceCmd struct { + baseCmd + + val []Z +} + +var _ Cmder = (*ZSliceCmd)(nil) + +func NewZSliceCmd(args ...interface{}) *ZSliceCmd { + return &ZSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *ZSliceCmd) Val() []Z { + return cmd.val +} + +func (cmd *ZSliceCmd) Result() ([]Z, error) { + return cmd.val, cmd.err +} + +func (cmd *ZSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(zSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]Z) + return nil +} + +//------------------------------------------------------------------------------ + +type ScanCmd struct { + baseCmd + + page []string + cursor uint64 + + process func(cmd Cmder) error +} + +var _ Cmder = (*ScanCmd)(nil) + +func NewScanCmd(process func(cmd Cmder) error, args ...interface{}) *ScanCmd { + return &ScanCmd{ + baseCmd: baseCmd{_args: args}, + process: process, + } +} + +func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { + return cmd.page, cmd.cursor +} + +func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { + return cmd.page, cmd.cursor, cmd.err +} + +func (cmd *ScanCmd) String() string { + return cmdString(cmd, cmd.page) +} + +func (cmd *ScanCmd) readReply(cn *pool.Conn) error { + cmd.page, cmd.cursor, cmd.err = cn.Rd.ReadScanReply() + return cmd.err +} + +// Iterator creates a new ScanIterator. +func (cmd *ScanCmd) Iterator() *ScanIterator { + return &ScanIterator{ + cmd: cmd, + } +} + +//------------------------------------------------------------------------------ + +type ClusterNode struct { + Id string + Addr string +} + +type ClusterSlot struct { + Start int + End int + Nodes []ClusterNode +} + +type ClusterSlotsCmd struct { + baseCmd + + val []ClusterSlot +} + +var _ Cmder = (*ClusterSlotsCmd)(nil) + +func NewClusterSlotsCmd(args ...interface{}) *ClusterSlotsCmd { + return &ClusterSlotsCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { + return cmd.val +} + +func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterSlotsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterSlotsCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(clusterSlotsParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]ClusterSlot) + return nil +} + +//------------------------------------------------------------------------------ + +// GeoLocation is used with GeoAdd to add geospatial location. +type GeoLocation struct { + Name string + Longitude, Latitude, Dist float64 + GeoHash int64 +} + +// GeoRadiusQuery is used with GeoRadius to query geospatial index. +type GeoRadiusQuery struct { + Radius float64 + // Can be m, km, ft, or mi. Default is km. + Unit string + WithCoord bool + WithDist bool + WithGeoHash bool + Count int + // Can be ASC or DESC. Default is no sort order. + Sort string + Store string + StoreDist string +} + +type GeoLocationCmd struct { + baseCmd + + q *GeoRadiusQuery + locations []GeoLocation +} + +var _ Cmder = (*GeoLocationCmd)(nil) + +func NewGeoLocationCmd(q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { + args = append(args, q.Radius) + if q.Unit != "" { + args = append(args, q.Unit) + } else { + args = append(args, "km") + } + if q.WithCoord { + args = append(args, "withcoord") + } + if q.WithDist { + args = append(args, "withdist") + } + if q.WithGeoHash { + args = append(args, "withhash") + } + if q.Count > 0 { + args = append(args, "count", q.Count) + } + if q.Sort != "" { + args = append(args, q.Sort) + } + if q.Store != "" { + args = append(args, "store") + args = append(args, q.Store) + } + if q.StoreDist != "" { + args = append(args, "storedist") + args = append(args, q.StoreDist) + } + return &GeoLocationCmd{ + baseCmd: baseCmd{_args: args}, + q: q, + } +} + +func (cmd *GeoLocationCmd) Val() []GeoLocation { + return cmd.locations +} + +func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { + return cmd.locations, cmd.err +} + +func (cmd *GeoLocationCmd) String() string { + return cmdString(cmd, cmd.locations) +} + +func (cmd *GeoLocationCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q)) + if cmd.err != nil { + return cmd.err + } + cmd.locations = v.([]GeoLocation) + return nil +} + +//------------------------------------------------------------------------------ + +type GeoPos struct { + Longitude, Latitude float64 +} + +type GeoPosCmd struct { + baseCmd + + positions []*GeoPos +} + +var _ Cmder = (*GeoPosCmd)(nil) + +func NewGeoPosCmd(args ...interface{}) *GeoPosCmd { + return &GeoPosCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *GeoPosCmd) Val() []*GeoPos { + return cmd.positions +} + +func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *GeoPosCmd) String() string { + return cmdString(cmd, cmd.positions) +} + +func (cmd *GeoPosCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(geoPosSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.positions = v.([]*GeoPos) + return nil +} + +//------------------------------------------------------------------------------ + +type CommandInfo struct { + Name string + Arity int8 + Flags []string + FirstKeyPos int8 + LastKeyPos int8 + StepCount int8 + ReadOnly bool +} + +type CommandsInfoCmd struct { + baseCmd + + val map[string]*CommandInfo +} + +var _ Cmder = (*CommandsInfoCmd)(nil) + +func NewCommandsInfoCmd(args ...interface{}) *CommandsInfoCmd { + return &CommandsInfoCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { + return cmd.val +} + +func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *CommandsInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *CommandsInfoCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(commandInfoSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]*CommandInfo) + return nil +} + +//------------------------------------------------------------------------------ + +type cmdsInfoCache struct { + fn func() (map[string]*CommandInfo, error) + + once internal.Once + cmds map[string]*CommandInfo +} + +func newCmdsInfoCache(fn func() (map[string]*CommandInfo, error)) *cmdsInfoCache { + return &cmdsInfoCache{ + fn: fn, + } +} + +func (c *cmdsInfoCache) Get() (map[string]*CommandInfo, error) { + err := c.once.Do(func() error { + cmds, err := c.fn() + if err != nil { + return err + } + c.cmds = cmds + return nil + }) + return c.cmds, err +} diff --git a/vendor/github.com/go-redis/redis/commands.go b/vendor/github.com/go-redis/redis/commands.go new file mode 100644 index 0000000000..dddf8acd65 --- /dev/null +++ b/vendor/github.com/go-redis/redis/commands.go @@ -0,0 +1,2337 @@ +package redis + +import ( + "errors" + "io" + "time" + + "github.com/go-redis/redis/internal" +) + +func readTimeout(timeout time.Duration) time.Duration { + if timeout == 0 { + return 0 + } + return timeout + 10*time.Second +} + +func usePrecise(dur time.Duration) bool { + return dur < time.Second || dur%time.Second != 0 +} + +func formatMs(dur time.Duration) int64 { + if dur > 0 && dur < time.Millisecond { + internal.Logf( + "specified duration is %s, but minimal supported value is %s", + dur, time.Millisecond, + ) + } + return int64(dur / time.Millisecond) +} + +func formatSec(dur time.Duration) int64 { + if dur > 0 && dur < time.Second { + internal.Logf( + "specified duration is %s, but minimal supported value is %s", + dur, time.Second, + ) + } + return int64(dur / time.Second) +} + +func appendArgs(dst, src []interface{}) []interface{} { + if len(src) == 1 { + if ss, ok := src[0].([]string); ok { + for _, s := range ss { + dst = append(dst, s) + } + return dst + } + } + + for _, v := range src { + dst = append(dst, v) + } + return dst +} + +type Cmdable interface { + Pipeline() Pipeliner + Pipelined(fn func(Pipeliner) error) ([]Cmder, error) + + TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) + TxPipeline() Pipeliner + + Command() *CommandsInfoCmd + ClientGetName() *StringCmd + Echo(message interface{}) *StringCmd + Ping() *StatusCmd + Quit() *StatusCmd + Del(keys ...string) *IntCmd + Unlink(keys ...string) *IntCmd + Dump(key string) *StringCmd + Exists(keys ...string) *IntCmd + Expire(key string, expiration time.Duration) *BoolCmd + ExpireAt(key string, tm time.Time) *BoolCmd + Keys(pattern string) *StringSliceCmd + Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd + Move(key string, db int64) *BoolCmd + ObjectRefCount(key string) *IntCmd + ObjectEncoding(key string) *StringCmd + ObjectIdleTime(key string) *DurationCmd + Persist(key string) *BoolCmd + PExpire(key string, expiration time.Duration) *BoolCmd + PExpireAt(key string, tm time.Time) *BoolCmd + PTTL(key string) *DurationCmd + RandomKey() *StringCmd + Rename(key, newkey string) *StatusCmd + RenameNX(key, newkey string) *BoolCmd + Restore(key string, ttl time.Duration, value string) *StatusCmd + RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd + Sort(key string, sort *Sort) *StringSliceCmd + SortStore(key, store string, sort *Sort) *IntCmd + SortInterfaces(key string, sort *Sort) *SliceCmd + Touch(keys ...string) *IntCmd + TTL(key string) *DurationCmd + Type(key string) *StatusCmd + Scan(cursor uint64, match string, count int64) *ScanCmd + SScan(key string, cursor uint64, match string, count int64) *ScanCmd + HScan(key string, cursor uint64, match string, count int64) *ScanCmd + ZScan(key string, cursor uint64, match string, count int64) *ScanCmd + Append(key, value string) *IntCmd + BitCount(key string, bitCount *BitCount) *IntCmd + BitOpAnd(destKey string, keys ...string) *IntCmd + BitOpOr(destKey string, keys ...string) *IntCmd + BitOpXor(destKey string, keys ...string) *IntCmd + BitOpNot(destKey string, key string) *IntCmd + BitPos(key string, bit int64, pos ...int64) *IntCmd + Decr(key string) *IntCmd + DecrBy(key string, decrement int64) *IntCmd + Get(key string) *StringCmd + GetBit(key string, offset int64) *IntCmd + GetRange(key string, start, end int64) *StringCmd + GetSet(key string, value interface{}) *StringCmd + Incr(key string) *IntCmd + IncrBy(key string, value int64) *IntCmd + IncrByFloat(key string, value float64) *FloatCmd + MGet(keys ...string) *SliceCmd + MSet(pairs ...interface{}) *StatusCmd + MSetNX(pairs ...interface{}) *BoolCmd + Set(key string, value interface{}, expiration time.Duration) *StatusCmd + SetBit(key string, offset int64, value int) *IntCmd + SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd + SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd + SetRange(key string, offset int64, value string) *IntCmd + StrLen(key string) *IntCmd + HDel(key string, fields ...string) *IntCmd + HExists(key, field string) *BoolCmd + HGet(key, field string) *StringCmd + HGetAll(key string) *StringStringMapCmd + HIncrBy(key, field string, incr int64) *IntCmd + HIncrByFloat(key, field string, incr float64) *FloatCmd + HKeys(key string) *StringSliceCmd + HLen(key string) *IntCmd + HMGet(key string, fields ...string) *SliceCmd + HMSet(key string, fields map[string]interface{}) *StatusCmd + HSet(key, field string, value interface{}) *BoolCmd + HSetNX(key, field string, value interface{}) *BoolCmd + HVals(key string) *StringSliceCmd + BLPop(timeout time.Duration, keys ...string) *StringSliceCmd + BRPop(timeout time.Duration, keys ...string) *StringSliceCmd + BRPopLPush(source, destination string, timeout time.Duration) *StringCmd + LIndex(key string, index int64) *StringCmd + LInsert(key, op string, pivot, value interface{}) *IntCmd + LInsertBefore(key string, pivot, value interface{}) *IntCmd + LInsertAfter(key string, pivot, value interface{}) *IntCmd + LLen(key string) *IntCmd + LPop(key string) *StringCmd + LPush(key string, values ...interface{}) *IntCmd + LPushX(key string, value interface{}) *IntCmd + LRange(key string, start, stop int64) *StringSliceCmd + LRem(key string, count int64, value interface{}) *IntCmd + LSet(key string, index int64, value interface{}) *StatusCmd + LTrim(key string, start, stop int64) *StatusCmd + RPop(key string) *StringCmd + RPopLPush(source, destination string) *StringCmd + RPush(key string, values ...interface{}) *IntCmd + RPushX(key string, value interface{}) *IntCmd + SAdd(key string, members ...interface{}) *IntCmd + SCard(key string) *IntCmd + SDiff(keys ...string) *StringSliceCmd + SDiffStore(destination string, keys ...string) *IntCmd + SInter(keys ...string) *StringSliceCmd + SInterStore(destination string, keys ...string) *IntCmd + SIsMember(key string, member interface{}) *BoolCmd + SMembers(key string) *StringSliceCmd + SMembersMap(key string) *StringStructMapCmd + SMove(source, destination string, member interface{}) *BoolCmd + SPop(key string) *StringCmd + SPopN(key string, count int64) *StringSliceCmd + SRandMember(key string) *StringCmd + SRandMemberN(key string, count int64) *StringSliceCmd + SRem(key string, members ...interface{}) *IntCmd + SUnion(keys ...string) *StringSliceCmd + SUnionStore(destination string, keys ...string) *IntCmd + XAdd(stream, id string, els map[string]interface{}) *StringCmd + XAddExt(opt *XAddExt) *StringCmd + XLen(key string) *IntCmd + XRange(stream, start, stop string) *XMessageSliceCmd + XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd + XRevRange(stream string, start, stop string) *XMessageSliceCmd + XRevRangeN(stream string, start, stop string, count int64) *XMessageSliceCmd + XRead(streams ...string) *XStreamSliceCmd + XReadN(count int64, streams ...string) *XStreamSliceCmd + XReadExt(opt *XReadExt) *XStreamSliceCmd + ZAdd(key string, members ...Z) *IntCmd + ZAddNX(key string, members ...Z) *IntCmd + ZAddXX(key string, members ...Z) *IntCmd + ZAddCh(key string, members ...Z) *IntCmd + ZAddNXCh(key string, members ...Z) *IntCmd + ZAddXXCh(key string, members ...Z) *IntCmd + ZIncr(key string, member Z) *FloatCmd + ZIncrNX(key string, member Z) *FloatCmd + ZIncrXX(key string, member Z) *FloatCmd + ZCard(key string) *IntCmd + ZCount(key, min, max string) *IntCmd + ZLexCount(key, min, max string) *IntCmd + ZIncrBy(key string, increment float64, member string) *FloatCmd + ZInterStore(destination string, store ZStore, keys ...string) *IntCmd + ZRange(key string, start, stop int64) *StringSliceCmd + ZRangeWithScores(key string, start, stop int64) *ZSliceCmd + ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd + ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd + ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd + ZRank(key, member string) *IntCmd + ZRem(key string, members ...interface{}) *IntCmd + ZRemRangeByRank(key string, start, stop int64) *IntCmd + ZRemRangeByScore(key, min, max string) *IntCmd + ZRemRangeByLex(key, min, max string) *IntCmd + ZRevRange(key string, start, stop int64) *StringSliceCmd + ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd + ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd + ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd + ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd + ZRevRank(key, member string) *IntCmd + ZScore(key, member string) *FloatCmd + ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd + PFAdd(key string, els ...interface{}) *IntCmd + PFCount(keys ...string) *IntCmd + PFMerge(dest string, keys ...string) *StatusCmd + BgRewriteAOF() *StatusCmd + BgSave() *StatusCmd + ClientKill(ipPort string) *StatusCmd + ClientKillByFilter(keys ...string) *IntCmd + ClientList() *StringCmd + ClientPause(dur time.Duration) *BoolCmd + ConfigGet(parameter string) *SliceCmd + ConfigResetStat() *StatusCmd + ConfigSet(parameter, value string) *StatusCmd + ConfigRewrite() *StatusCmd + DBSize() *IntCmd + FlushAll() *StatusCmd + FlushAllAsync() *StatusCmd + FlushDB() *StatusCmd + FlushDBAsync() *StatusCmd + Info(section ...string) *StringCmd + LastSave() *IntCmd + Save() *StatusCmd + Shutdown() *StatusCmd + ShutdownSave() *StatusCmd + ShutdownNoSave() *StatusCmd + SlaveOf(host, port string) *StatusCmd + Time() *TimeCmd + Eval(script string, keys []string, args ...interface{}) *Cmd + EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd + ScriptExists(hashes ...string) *BoolSliceCmd + ScriptFlush() *StatusCmd + ScriptKill() *StatusCmd + ScriptLoad(script string) *StringCmd + DebugObject(key string) *StringCmd + Publish(channel string, message interface{}) *IntCmd + PubSubChannels(pattern string) *StringSliceCmd + PubSubNumSub(channels ...string) *StringIntMapCmd + PubSubNumPat() *IntCmd + ClusterSlots() *ClusterSlotsCmd + ClusterNodes() *StringCmd + ClusterMeet(host, port string) *StatusCmd + ClusterForget(nodeID string) *StatusCmd + ClusterReplicate(nodeID string) *StatusCmd + ClusterResetSoft() *StatusCmd + ClusterResetHard() *StatusCmd + ClusterInfo() *StringCmd + ClusterKeySlot(key string) *IntCmd + ClusterCountFailureReports(nodeID string) *IntCmd + ClusterCountKeysInSlot(slot int) *IntCmd + ClusterDelSlots(slots ...int) *StatusCmd + ClusterDelSlotsRange(min, max int) *StatusCmd + ClusterSaveConfig() *StatusCmd + ClusterSlaves(nodeID string) *StringSliceCmd + ClusterFailover() *StatusCmd + ClusterAddSlots(slots ...int) *StatusCmd + ClusterAddSlotsRange(min, max int) *StatusCmd + GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd + GeoPos(key string, members ...string) *GeoPosCmd + GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusRO(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusByMemberRO(key, member string, query *GeoRadiusQuery) *GeoLocationCmd + GeoDist(key string, member1, member2, unit string) *FloatCmd + GeoHash(key string, members ...string) *StringSliceCmd + ReadOnly() *StatusCmd + ReadWrite() *StatusCmd + MemoryUsage(key string, samples ...int) *IntCmd +} + +type StatefulCmdable interface { + Cmdable + Auth(password string) *StatusCmd + Select(index int) *StatusCmd + SwapDB(index1, index2 int) *StatusCmd + ClientSetName(name string) *BoolCmd +} + +var _ Cmdable = (*Client)(nil) +var _ Cmdable = (*Tx)(nil) +var _ Cmdable = (*Ring)(nil) +var _ Cmdable = (*ClusterClient)(nil) + +type cmdable struct { + process func(cmd Cmder) error +} + +func (c *cmdable) setProcessor(fn func(Cmder) error) { + c.process = fn +} + +type statefulCmdable struct { + cmdable + process func(cmd Cmder) error +} + +func (c *statefulCmdable) setProcessor(fn func(Cmder) error) { + c.process = fn + c.cmdable.setProcessor(fn) +} + +//------------------------------------------------------------------------------ + +func (c *statefulCmdable) Auth(password string) *StatusCmd { + cmd := NewStatusCmd("auth", password) + c.process(cmd) + return cmd +} + +func (c *cmdable) Echo(message interface{}) *StringCmd { + cmd := NewStringCmd("echo", message) + c.process(cmd) + return cmd +} + +func (c *cmdable) Ping() *StatusCmd { + cmd := NewStatusCmd("ping") + c.process(cmd) + return cmd +} + +func (c *cmdable) Wait(numSlaves int, timeout time.Duration) *IntCmd { + cmd := NewIntCmd("wait", numSlaves, int(timeout/time.Millisecond)) + c.process(cmd) + return cmd +} + +func (c *cmdable) Quit() *StatusCmd { + panic("not implemented") +} + +func (c *statefulCmdable) Select(index int) *StatusCmd { + cmd := NewStatusCmd("select", index) + c.process(cmd) + return cmd +} + +func (c *statefulCmdable) SwapDB(index1, index2 int) *StatusCmd { + cmd := NewStatusCmd("swapdb", index1, index2) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) Command() *CommandsInfoCmd { + cmd := NewCommandsInfoCmd("command") + c.process(cmd) + return cmd +} + +func (c *cmdable) Del(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "del" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Unlink(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "unlink" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Dump(key string) *StringCmd { + cmd := NewStringCmd("dump", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Exists(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "exists" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Expire(key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd("expire", key, formatSec(expiration)) + c.process(cmd) + return cmd +} + +func (c *cmdable) ExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd("expireat", key, tm.Unix()) + c.process(cmd) + return cmd +} + +func (c *cmdable) Keys(pattern string) *StringSliceCmd { + cmd := NewStringSliceCmd("keys", pattern) + c.process(cmd) + return cmd +} + +func (c *cmdable) Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd { + cmd := NewStatusCmd( + "migrate", + host, + port, + key, + db, + formatMs(timeout), + ) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) Move(key string, db int64) *BoolCmd { + cmd := NewBoolCmd("move", key, db) + c.process(cmd) + return cmd +} + +func (c *cmdable) ObjectRefCount(key string) *IntCmd { + cmd := NewIntCmd("object", "refcount", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) ObjectEncoding(key string) *StringCmd { + cmd := NewStringCmd("object", "encoding", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) ObjectIdleTime(key string) *DurationCmd { + cmd := NewDurationCmd(time.Second, "object", "idletime", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Persist(key string) *BoolCmd { + cmd := NewBoolCmd("persist", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) PExpire(key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd("pexpire", key, formatMs(expiration)) + c.process(cmd) + return cmd +} + +func (c *cmdable) PExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd( + "pexpireat", + key, + tm.UnixNano()/int64(time.Millisecond), + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) PTTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Millisecond, "pttl", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) RandomKey() *StringCmd { + cmd := NewStringCmd("randomkey") + c.process(cmd) + return cmd +} + +func (c *cmdable) Rename(key, newkey string) *StatusCmd { + cmd := NewStatusCmd("rename", key, newkey) + c.process(cmd) + return cmd +} + +func (c *cmdable) RenameNX(key, newkey string) *BoolCmd { + cmd := NewBoolCmd("renamenx", key, newkey) + c.process(cmd) + return cmd +} + +func (c *cmdable) Restore(key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + "restore", + key, + formatMs(ttl), + value, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + "restore", + key, + formatMs(ttl), + value, + "replace", + ) + c.process(cmd) + return cmd +} + +type Sort struct { + By string + Offset, Count int64 + Get []string + Order string + Alpha bool +} + +func (sort *Sort) args(key string) []interface{} { + args := []interface{}{"sort", key} + if sort.By != "" { + args = append(args, "by", sort.By) + } + if sort.Offset != 0 || sort.Count != 0 { + args = append(args, "limit", sort.Offset, sort.Count) + } + for _, get := range sort.Get { + args = append(args, "get", get) + } + if sort.Order != "" { + args = append(args, sort.Order) + } + if sort.Alpha { + args = append(args, "alpha") + } + return args +} + +func (c *cmdable) Sort(key string, sort *Sort) *StringSliceCmd { + cmd := NewStringSliceCmd(sort.args(key)...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SortStore(key, store string, sort *Sort) *IntCmd { + args := sort.args(key) + if store != "" { + args = append(args, "store", store) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SortInterfaces(key string, sort *Sort) *SliceCmd { + cmd := NewSliceCmd(sort.args(key)...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Touch(keys ...string) *IntCmd { + args := make([]interface{}, len(keys)+1) + args[0] = "touch" + for i, key := range keys { + args[i+1] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) TTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Second, "ttl", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Type(key string) *StatusCmd { + cmd := NewStatusCmd("type", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Scan(cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"scan", cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SScan(key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"sscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HScan(key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"hscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZScan(key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"zscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) Append(key, value string) *IntCmd { + cmd := NewIntCmd("append", key, value) + c.process(cmd) + return cmd +} + +type BitCount struct { + Start, End int64 +} + +func (c *cmdable) BitCount(key string, bitCount *BitCount) *IntCmd { + args := []interface{}{"bitcount", key} + if bitCount != nil { + args = append( + args, + bitCount.Start, + bitCount.End, + ) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) bitOp(op, destKey string, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "bitop" + args[1] = op + args[2] = destKey + for i, key := range keys { + args[3+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) BitOpAnd(destKey string, keys ...string) *IntCmd { + return c.bitOp("and", destKey, keys...) +} + +func (c *cmdable) BitOpOr(destKey string, keys ...string) *IntCmd { + return c.bitOp("or", destKey, keys...) +} + +func (c *cmdable) BitOpXor(destKey string, keys ...string) *IntCmd { + return c.bitOp("xor", destKey, keys...) +} + +func (c *cmdable) BitOpNot(destKey string, key string) *IntCmd { + return c.bitOp("not", destKey, key) +} + +func (c *cmdable) BitPos(key string, bit int64, pos ...int64) *IntCmd { + args := make([]interface{}, 3+len(pos)) + args[0] = "bitpos" + args[1] = key + args[2] = bit + switch len(pos) { + case 0: + case 1: + args[3] = pos[0] + case 2: + args[3] = pos[0] + args[4] = pos[1] + default: + panic("too many arguments") + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Decr(key string) *IntCmd { + cmd := NewIntCmd("decr", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) DecrBy(key string, decrement int64) *IntCmd { + cmd := NewIntCmd("decrby", key, decrement) + c.process(cmd) + return cmd +} + +// Redis `GET key` command. It returns redis.Nil error when key does not exist. +func (c *cmdable) Get(key string) *StringCmd { + cmd := NewStringCmd("get", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) GetBit(key string, offset int64) *IntCmd { + cmd := NewIntCmd("getbit", key, offset) + c.process(cmd) + return cmd +} + +func (c *cmdable) GetRange(key string, start, end int64) *StringCmd { + cmd := NewStringCmd("getrange", key, start, end) + c.process(cmd) + return cmd +} + +func (c *cmdable) GetSet(key string, value interface{}) *StringCmd { + cmd := NewStringCmd("getset", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) Incr(key string) *IntCmd { + cmd := NewIntCmd("incr", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) IncrBy(key string, value int64) *IntCmd { + cmd := NewIntCmd("incrby", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) IncrByFloat(key string, value float64) *FloatCmd { + cmd := NewFloatCmd("incrbyfloat", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) MGet(keys ...string) *SliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "mget" + for i, key := range keys { + args[1+i] = key + } + cmd := NewSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) MSet(pairs ...interface{}) *StatusCmd { + args := make([]interface{}, 1, 1+len(pairs)) + args[0] = "mset" + args = appendArgs(args, pairs) + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) MSetNX(pairs ...interface{}) *BoolCmd { + args := make([]interface{}, 1, 1+len(pairs)) + args[0] = "msetnx" + args = appendArgs(args, pairs) + cmd := NewBoolCmd(args...) + c.process(cmd) + return cmd +} + +// Redis `SET key value [expiration]` command. +// +// Use expiration for `SETEX`-like behavior. +// Zero expiration means the key has no expiration time. +func (c *cmdable) Set(key string, value interface{}, expiration time.Duration) *StatusCmd { + args := make([]interface{}, 3, 4) + args[0] = "set" + args[1] = key + args[2] = value + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(expiration)) + } else { + args = append(args, "ex", formatSec(expiration)) + } + } + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SetBit(key string, offset int64, value int) *IntCmd { + cmd := NewIntCmd( + "setbit", + key, + offset, + value, + ) + c.process(cmd) + return cmd +} + +// Redis `SET key value [expiration] NX` command. +// +// Zero expiration means the key has no expiration time. +func (c *cmdable) SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + if expiration == 0 { + // Use old `SETNX` to support old Redis versions. + cmd = NewBoolCmd("setnx", key, value) + } else { + if usePrecise(expiration) { + cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "nx") + } else { + cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "nx") + } + } + c.process(cmd) + return cmd +} + +// Redis `SET key value [expiration] XX` command. +// +// Zero expiration means the key has no expiration time. +func (c *cmdable) SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + if expiration == 0 { + cmd = NewBoolCmd("set", key, value, "xx") + } else { + if usePrecise(expiration) { + cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "xx") + } else { + cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "xx") + } + } + c.process(cmd) + return cmd +} + +func (c *cmdable) SetRange(key string, offset int64, value string) *IntCmd { + cmd := NewIntCmd("setrange", key, offset, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) StrLen(key string) *IntCmd { + cmd := NewIntCmd("strlen", key) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) HDel(key string, fields ...string) *IntCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hdel" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HExists(key, field string) *BoolCmd { + cmd := NewBoolCmd("hexists", key, field) + c.process(cmd) + return cmd +} + +func (c *cmdable) HGet(key, field string) *StringCmd { + cmd := NewStringCmd("hget", key, field) + c.process(cmd) + return cmd +} + +func (c *cmdable) HGetAll(key string) *StringStringMapCmd { + cmd := NewStringStringMapCmd("hgetall", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) HIncrBy(key, field string, incr int64) *IntCmd { + cmd := NewIntCmd("hincrby", key, field, incr) + c.process(cmd) + return cmd +} + +func (c *cmdable) HIncrByFloat(key, field string, incr float64) *FloatCmd { + cmd := NewFloatCmd("hincrbyfloat", key, field, incr) + c.process(cmd) + return cmd +} + +func (c *cmdable) HKeys(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("hkeys", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) HLen(key string) *IntCmd { + cmd := NewIntCmd("hlen", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) HMGet(key string, fields ...string) *SliceCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hmget" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HMSet(key string, fields map[string]interface{}) *StatusCmd { + args := make([]interface{}, 2+len(fields)*2) + args[0] = "hmset" + args[1] = key + i := 2 + for k, v := range fields { + args[i] = k + args[i+1] = v + i += 2 + } + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HSet(key, field string, value interface{}) *BoolCmd { + cmd := NewBoolCmd("hset", key, field, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) HSetNX(key, field string, value interface{}) *BoolCmd { + cmd := NewBoolCmd("hsetnx", key, field, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) HVals(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("hvals", key) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) BLPop(timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "blpop" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(timeout) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) BRPop(timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "brpop" + for i, key := range keys { + args[1+i] = key + } + args[len(keys)+1] = formatSec(timeout) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) BRPopLPush(source, destination string, timeout time.Duration) *StringCmd { + cmd := NewStringCmd( + "brpoplpush", + source, + destination, + formatSec(timeout), + ) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) LIndex(key string, index int64) *StringCmd { + cmd := NewStringCmd("lindex", key, index) + c.process(cmd) + return cmd +} + +func (c *cmdable) LInsert(key, op string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd("linsert", key, op, pivot, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LInsertBefore(key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd("linsert", key, "before", pivot, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LInsertAfter(key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd("linsert", key, "after", pivot, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LLen(key string) *IntCmd { + cmd := NewIntCmd("llen", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) LPop(key string) *StringCmd { + cmd := NewStringCmd("lpop", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) LPush(key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "lpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) LPushX(key string, value interface{}) *IntCmd { + cmd := NewIntCmd("lpushx", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LRange(key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd( + "lrange", + key, + start, + stop, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) LRem(key string, count int64, value interface{}) *IntCmd { + cmd := NewIntCmd("lrem", key, count, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LSet(key string, index int64, value interface{}) *StatusCmd { + cmd := NewStatusCmd("lset", key, index, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LTrim(key string, start, stop int64) *StatusCmd { + cmd := NewStatusCmd( + "ltrim", + key, + start, + stop, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPop(key string) *StringCmd { + cmd := NewStringCmd("rpop", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPopLPush(source, destination string) *StringCmd { + cmd := NewStringCmd("rpoplpush", source, destination) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPush(key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "rpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPushX(key string, value interface{}) *IntCmd { + cmd := NewIntCmd("rpushx", key, value) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) SAdd(key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "sadd" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SCard(key string) *IntCmd { + cmd := NewIntCmd("scard", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) SDiff(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sdiff" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SDiffStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sdiffstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SInter(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sinter" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SInterStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sinterstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SIsMember(key string, member interface{}) *BoolCmd { + cmd := NewBoolCmd("sismember", key, member) + c.process(cmd) + return cmd +} + +// Redis `SMEMBERS key` command output as a slice +func (c *cmdable) SMembers(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("smembers", key) + c.process(cmd) + return cmd +} + +// Redis `SMEMBERS key` command output as a map +func (c *cmdable) SMembersMap(key string) *StringStructMapCmd { + cmd := NewStringStructMapCmd("smembers", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) SMove(source, destination string, member interface{}) *BoolCmd { + cmd := NewBoolCmd("smove", source, destination, member) + c.process(cmd) + return cmd +} + +// Redis `SPOP key` command. +func (c *cmdable) SPop(key string) *StringCmd { + cmd := NewStringCmd("spop", key) + c.process(cmd) + return cmd +} + +// Redis `SPOP key count` command. +func (c *cmdable) SPopN(key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd("spop", key, count) + c.process(cmd) + return cmd +} + +// Redis `SRANDMEMBER key` command. +func (c *cmdable) SRandMember(key string) *StringCmd { + cmd := NewStringCmd("srandmember", key) + c.process(cmd) + return cmd +} + +// Redis `SRANDMEMBER key count` command. +func (c *cmdable) SRandMemberN(key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd("srandmember", key, count) + c.process(cmd) + return cmd +} + +func (c *cmdable) SRem(key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "srem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SUnion(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sunion" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SUnionStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sunionstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +type XAddExt struct { + Stream string + MaxLen int64 // MAXLEN N + MaxLenApprox int64 // MAXLEN ~ N + ID string + Values map[string]interface{} +} + +func (c *cmdable) XAddExt(opt *XAddExt) *StringCmd { + a := make([]interface{}, 0, 6+len(opt.Values)*2) + a = append(a, "xadd") + a = append(a, opt.Stream) + if opt.MaxLen > 0 { + a = append(a, "maxlen", opt.MaxLen) + } else if opt.MaxLenApprox > 0 { + a = append(a, "maxlen", "~", opt.MaxLenApprox) + } + if opt.ID != "" { + a = append(a, opt.ID) + } else { + a = append(a, "*") + } + for k, v := range opt.Values { + a = append(a, k) + a = append(a, v) + } + + cmd := NewStringCmd(a...) + c.process(cmd) + return cmd +} + +func (c *cmdable) XAdd(stream, id string, values map[string]interface{}) *StringCmd { + return c.XAddExt(&XAddExt{ + Stream: stream, + ID: id, + Values: values, + }) +} + +func (c *cmdable) XLen(key string) *IntCmd { + cmd := NewIntCmd("xlen", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRange(stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrange", stream, start, stop) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrange", stream, start, stop, "count", count) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRevRange(stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRevRangeN(stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop, "count", count) + c.process(cmd) + return cmd +} + +type XReadExt struct { + Streams []string + Count int64 + Block time.Duration +} + +func (c *cmdable) XReadExt(opt *XReadExt) *XStreamSliceCmd { + a := make([]interface{}, 0, 5+len(opt.Streams)) + a = append(a, "xread") + if opt != nil { + if opt.Count > 0 { + a = append(a, "count") + a = append(a, opt.Count) + } + if opt.Block >= 0 { + a = append(a, "block") + a = append(a, int64(opt.Block/time.Millisecond)) + } + } + a = append(a, "streams") + for _, s := range opt.Streams { + a = append(a, s) + } + + cmd := NewXStreamSliceCmd(a...) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRead(streams ...string) *XStreamSliceCmd { + return c.XReadExt(&XReadExt{ + Streams: streams, + Block: -1, + }) +} + +func (c *cmdable) XReadN(count int64, streams ...string) *XStreamSliceCmd { + return c.XReadExt(&XReadExt{ + Streams: streams, + Count: count, + Block: -1, + }) +} + +func (c *cmdable) XReadBlock(block time.Duration, streams ...string) *XStreamSliceCmd { + return c.XReadExt(&XReadExt{ + Streams: streams, + Block: block, + }) +} + +//------------------------------------------------------------------------------ + +// Z represents sorted set member. +type Z struct { + Score float64 + Member interface{} +} + +// ZStore is used as an arg to ZInterStore and ZUnionStore. +type ZStore struct { + Weights []float64 + // Can be SUM, MIN or MAX. + Aggregate string +} + +func (c *cmdable) zAdd(a []interface{}, n int, members ...Z) *IntCmd { + for i, m := range members { + a[n+2*i] = m.Score + a[n+2*i+1] = m.Member + } + cmd := NewIntCmd(a...) + c.process(cmd) + return cmd +} + +// Redis `ZADD key score member [score member ...]` command. +func (c *cmdable) ZAdd(key string, members ...Z) *IntCmd { + const n = 2 + a := make([]interface{}, n+2*len(members)) + a[0], a[1] = "zadd", key + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key NX score member [score member ...]` command. +func (c *cmdable) ZAddNX(key string, members ...Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "nx" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key XX score member [score member ...]` command. +func (c *cmdable) ZAddXX(key string, members ...Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "xx" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key CH score member [score member ...]` command. +func (c *cmdable) ZAddCh(key string, members ...Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "ch" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key NX CH score member [score member ...]` command. +func (c *cmdable) ZAddNXCh(key string, members ...Z) *IntCmd { + const n = 4 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key XX CH score member [score member ...]` command. +func (c *cmdable) ZAddXXCh(key string, members ...Z) *IntCmd { + const n = 4 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch" + return c.zAdd(a, n, members...) +} + +func (c *cmdable) zIncr(a []interface{}, n int, members ...Z) *FloatCmd { + for i, m := range members { + a[n+2*i] = m.Score + a[n+2*i+1] = m.Member + } + cmd := NewFloatCmd(a...) + c.process(cmd) + return cmd +} + +// Redis `ZADD key INCR score member` command. +func (c *cmdable) ZIncr(key string, member Z) *FloatCmd { + const n = 3 + a := make([]interface{}, n+2) + a[0], a[1], a[2] = "zadd", key, "incr" + return c.zIncr(a, n, member) +} + +// Redis `ZADD key NX INCR score member` command. +func (c *cmdable) ZIncrNX(key string, member Z) *FloatCmd { + const n = 4 + a := make([]interface{}, n+2) + a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx" + return c.zIncr(a, n, member) +} + +// Redis `ZADD key XX INCR score member` command. +func (c *cmdable) ZIncrXX(key string, member Z) *FloatCmd { + const n = 4 + a := make([]interface{}, n+2) + a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx" + return c.zIncr(a, n, member) +} + +func (c *cmdable) ZCard(key string) *IntCmd { + cmd := NewIntCmd("zcard", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZCount(key, min, max string) *IntCmd { + cmd := NewIntCmd("zcount", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZLexCount(key, min, max string) *IntCmd { + cmd := NewIntCmd("zlexcount", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZIncrBy(key string, increment float64, member string) *FloatCmd { + cmd := NewFloatCmd("zincrby", key, increment, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZInterStore(destination string, store ZStore, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "zinterstore" + args[1] = destination + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + if len(store.Weights) > 0 { + args = append(args, "weights") + for _, weight := range store.Weights { + args = append(args, weight) + } + } + if store.Aggregate != "" { + args = append(args, "aggregate", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd { + args := []interface{}{ + "zrange", + key, + start, + stop, + } + if withScores { + args = append(args, "withscores") + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRange(key string, start, stop int64) *StringSliceCmd { + return c.zRange(key, start, stop, false) +} + +func (c *cmdable) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd("zrange", key, start, stop, "withscores") + c.process(cmd) + return cmd +} + +type ZRangeBy struct { + Min, Max string + Offset, Count int64 +} + +func (c *cmdable) zRangeBy(zcmd, key string, opt ZRangeBy, withScores bool) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Min, opt.Max} + if withScores { + args = append(args, "withscores") + } + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRangeBy("zrangebyscore", key, opt, false) +} + +func (c *cmdable) ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRangeBy("zrangebylex", key, opt, false) +} + +func (c *cmdable) ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRank(key, member string) *IntCmd { + cmd := NewIntCmd("zrank", key, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRem(key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "zrem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRemRangeByRank(key string, start, stop int64) *IntCmd { + cmd := NewIntCmd( + "zremrangebyrank", + key, + start, + stop, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRemRangeByScore(key, min, max string) *IntCmd { + cmd := NewIntCmd("zremrangebyscore", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRemRangeByLex(key, min, max string) *IntCmd { + cmd := NewIntCmd("zremrangebylex", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRange(key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd("zrevrange", key, start, stop) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd("zrevrange", key, start, stop, "withscores") + c.process(cmd) + return cmd +} + +func (c *cmdable) zRevRangeBy(zcmd, key string, opt ZRangeBy) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Max, opt.Min} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy("zrevrangebyscore", key, opt) +} + +func (c *cmdable) ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy("zrevrangebylex", key, opt) +} + +func (c *cmdable) ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRank(key, member string) *IntCmd { + cmd := NewIntCmd("zrevrank", key, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZScore(key, member string) *FloatCmd { + cmd := NewFloatCmd("zscore", key, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "zunionstore" + args[1] = dest + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + if len(store.Weights) > 0 { + args = append(args, "weights") + for _, weight := range store.Weights { + args = append(args, weight) + } + } + if store.Aggregate != "" { + args = append(args, "aggregate", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) PFAdd(key string, els ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(els)) + args[0] = "pfadd" + args[1] = key + args = appendArgs(args, els) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) PFCount(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "pfcount" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) PFMerge(dest string, keys ...string) *StatusCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "pfmerge" + args[1] = dest + for i, key := range keys { + args[2+i] = key + } + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) BgRewriteAOF() *StatusCmd { + cmd := NewStatusCmd("bgrewriteaof") + c.process(cmd) + return cmd +} + +func (c *cmdable) BgSave() *StatusCmd { + cmd := NewStatusCmd("bgsave") + c.process(cmd) + return cmd +} + +func (c *cmdable) ClientKill(ipPort string) *StatusCmd { + cmd := NewStatusCmd("client", "kill", ipPort) + c.process(cmd) + return cmd +} + +// ClientKillByFilter is new style synx, while the ClientKill is old +// CLIENT KILL