diff --git a/.custom-gcl.yml b/.custom-gcl.yml new file mode 100644 index 000000000..926af7441 --- /dev/null +++ b/.custom-gcl.yml @@ -0,0 +1,4 @@ +version: v1.64.6 +plugins: + - module: 'github.com/lightninglabs/lightning-terminal/tools/linters' + path: ./tools/linters diff --git a/.golangci.yml b/.golangci.yml index 8e0071984..17f66266e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -14,6 +14,17 @@ run: - dev linters-settings: + custom: + ll: + type: "module" + description: "Custom lll linter with 'S' log line exclusion." + settings: + # Max line length, lines longer will be reported. + line-length: 80 + # Tab width in spaces. + tab-width: 8 + # The regex that we will use to detect the start of an `S` log line. + log-regex: "^\\s*.*(L|l)og\\.(Info|Debug|Trace|Warn|Error|Critical)S\\(" govet: # Don't report about shadowed variables check-shadowing: false @@ -39,7 +50,7 @@ linters-settings: linters: enable: - - lll + - ll - gofmt - tagliatelle - whitespace diff --git a/Makefile b/Makefile index 4c89d813e..29cd76f94 100644 --- a/Makefile +++ b/Makefile @@ -304,7 +304,7 @@ check-go-version: check-go-version-dockerfile check-go-version-yaml lint: check-go-version docker-tools @$(call print, "Linting source.") - $(DOCKER_TOOLS) golangci-lint run -v $(LINT_WORKERS) + $(DOCKER_TOOLS) custom-gcl run -v $(LINT_WORKERS) mod: @$(call print, "Tidying modules.") diff --git a/accounts/checkers.go b/accounts/checkers.go index 0b99bd68a..ac734cd0c 100644 --- a/accounts/checkers.go +++ b/accounts/checkers.go @@ -110,6 +110,7 @@ func NewAccountChecker(service Service, ) } + // nolint:ll checkers := CheckerMap{ // Invoices: "/lnrpc.Lightning/AddInvoice": mid.NewResponseRewriter( diff --git a/accounts/checkers_test.go b/accounts/checkers_test.go index 0e6ef37ab..8289d52e0 100644 --- a/accounts/checkers_test.go +++ b/accounts/checkers_test.go @@ -379,6 +379,7 @@ func TestAccountCheckers(t *testing.T) { originalRequest: &lnrpc.PendingChannelsRequest{}, originalResponse: &lnrpc.PendingChannelsResponse{ TotalLimboBalance: 123456, + // nolint:ll PendingOpenChannels: []*lnrpc.PendingChannelsResponse_PendingOpenChannel{ {}, }, diff --git a/accounts/store_sql.go b/accounts/store_sql.go index 2fc6d2293..8656589a1 100644 --- a/accounts/store_sql.go +++ b/accounts/store_sql.go @@ -31,7 +31,7 @@ const ( // SQLQueries is a subset of the sqlc.Queries interface that can be used // to interact with accounts related tables. // -//nolint:lll +//nolint:ll type SQLQueries interface { AddAccountInvoice(ctx context.Context, arg sqlc.AddAccountInvoiceParams) error DeleteAccount(ctx context.Context, id int64) error diff --git a/accounts/tlv.go b/accounts/tlv.go index 87cd09e23..71cd5a0c5 100644 --- a/accounts/tlv.go +++ b/accounts/tlv.go @@ -227,7 +227,9 @@ func PaymentEntryMapEncoder(w io.Writer, val any, buf *[8]byte) error { } // PaymentEntryMapDecoder decodes a map of payment entries. -func PaymentEntryMapDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error { +func PaymentEntryMapDecoder(r io.Reader, val any, buf *[8]byte, + _ uint64) error { + if typ, ok := val.(*AccountPayments); ok { numItems, err := tlv.ReadVarInt(r, buf) if err != nil { diff --git a/autopilotserver/client.go b/autopilotserver/client.go index f2327f7ca..b4ac90ba3 100644 --- a/autopilotserver/client.go +++ b/autopilotserver/client.go @@ -27,7 +27,7 @@ var ErrVersionIncompatible = fmt.Errorf("litd version is not compatible " + // Config holds the configuration options for the autopilot server client. // -//nolint:lll +//nolint:ll type Config struct { // Disable will disable the autopilot client. Disable bool `long:"disable" description:"disable the autopilot client"` diff --git a/autopilotserver/mock/server.go b/autopilotserver/mock/server.go index 2c7eb2b4c..4aace89b3 100644 --- a/autopilotserver/mock/server.go +++ b/autopilotserver/mock/server.go @@ -310,7 +310,8 @@ func (m *Server) GetPrivacyFlags(remoteKey *btcec.PublicKey) ( key := hex.EncodeToString(remoteKey.SerializeCompressed()) sess, ok := m.sessions[key] if !ok { - return session.PrivacyFlags{}, fmt.Errorf("no such client found") + return session.PrivacyFlags{}, + fmt.Errorf("no such client found") } privacyFlags, err := session.Deserialize(sess.privacyFlags) @@ -436,7 +437,9 @@ func rulesToRPC(rulesMap map[string]*RuleRanges) ( return res, nil } -func permissionsToRPC(ps map[string][]bakery.Op) []*autopilotserverrpc.Permissions { +func permissionsToRPC( + ps map[string][]bakery.Op) []*autopilotserverrpc.Permissions { + res := make([]*autopilotserverrpc.Permissions, len(ps)) for method, ops := range ps { diff --git a/cmd/litcli/accounts.go b/cmd/litcli/accounts.go index 7816879d2..a424478d9 100644 --- a/cmd/litcli/accounts.go +++ b/cmd/litcli/accounts.go @@ -146,7 +146,8 @@ var updateAccountCommand = cli.Command{ Name: "update", ShortName: "u", Usage: "Update an existing off-chain account.", - ArgsUsage: "[id | label] new_balance [new_expiration_date] [--save_to=]", + ArgsUsage: "[id | label] new_balance [new_expiration_date] " + + "[--save_to=]", Description: "Updates an existing off-chain account and sets " + "either a new balance or new expiration date or both.", Flags: []cli.Flag{ diff --git a/cmd/litcli/actions.go b/cmd/litcli/actions.go index 2d37e7e52..49806a689 100644 --- a/cmd/litcli/actions.go +++ b/cmd/litcli/actions.go @@ -53,7 +53,8 @@ var listActionsCommand = cli.Command{ Name: "state", Usage: "The action state to filter on. If not set, " + "then actions of any state will be returned. " + - "Options include: 'pending', 'done' and 'error'.", + "Options include: 'pending', 'done' and " + + "'error'.", }, cli.Uint64Flag{ Name: "index_offset", diff --git a/cmd/litcli/autopilot.go b/cmd/litcli/autopilot.go index 1c5b552bf..491f13195 100644 --- a/cmd/litcli/autopilot.go +++ b/cmd/litcli/autopilot.go @@ -131,11 +131,11 @@ var addAutopilotSessionCmd = cli.Command{ }, cli.StringFlag{ Name: "privacy-flags", - Usage: "String representation of privacy flags to set " + - "for the session. Each individual flag will " + - "remove privacy from certain aspects of " + - "messages transmitted to autopilot. " + - "The strongest privacy is on by " + + Usage: "String representation of privacy flags " + + "to set for the session. Each individual " + + "flag will remove privacy from certain " + + "aspects of messages transmitted to " + + "autopilot. The strongest privacy is on by " + "default and an empty string means full " + "privacy. Some features may not be able to " + "run correctly with full privacy, see the " + diff --git a/cmd/litcli/proxy.go b/cmd/litcli/proxy.go index 2ae302ad3..0633903d8 100644 --- a/cmd/litcli/proxy.go +++ b/cmd/litcli/proxy.go @@ -17,8 +17,8 @@ var litCommands = []cli.Command{ Name: "bakesupermacaroon", Usage: "Bake a new super macaroon with all of LiT's active " + "permissions", - Description: "Bake a new super macaroon with all of LiT's active " + - "permissions.", + Description: "Bake a new super macaroon with all of LiT's " + + "active permissions.", Category: "LiT", Action: bakeSuperMacaroon, Flags: []cli.Flag{ @@ -47,8 +47,8 @@ var litCommands = []cli.Command{ Name: "getinfo", Usage: "Returns basic information related to the active " + "daemon", - Description: "Returns basic information related to the active " + - "daemon.", + Description: "Returns basic information related to the " + + "active daemon.", Category: "LiT", Action: getInfo, }, diff --git a/cmd/litcli/sessions.go b/cmd/litcli/sessions.go index c21c69c97..e6d1c8f2b 100644 --- a/cmd/litcli/sessions.go +++ b/cmd/litcli/sessions.go @@ -121,17 +121,16 @@ func addSession(cli *cli.Context) error { sessionExpiry := time.Now().Add(sessionLength).Unix() ctx := getContext() - resp, err := client.AddSession( - ctx, &litrpc.AddSessionRequest{ - Label: cli.String("label"), - SessionType: sessType, - ExpiryTimestampSeconds: uint64(sessionExpiry), - MailboxServerAddr: cli.String("mailboxserveraddr"), - DevServer: cli.Bool("devserver"), - MacaroonCustomPermissions: macPerms, - AccountId: cli.String("account_id"), - }, - ) + req := litrpc.AddSessionRequest{ + Label: cli.String("label"), + SessionType: sessType, + ExpiryTimestampSeconds: uint64(sessionExpiry), + MailboxServerAddr: cli.String("mailboxserveraddr"), + DevServer: cli.Bool("devserver"), + MacaroonCustomPermissions: macPerms, + AccountId: cli.String("account_id"), + } + resp, err := client.AddSession(ctx, &req) if err != nil { return err } diff --git a/config.go b/config.go index 6b40d663e..32b180600 100644 --- a/config.go +++ b/config.go @@ -146,7 +146,7 @@ var ( // all config items of its enveloping subservers, each prefixed with their // daemon's short name. // -//nolint:lll +//nolint:ll type Config struct { ShowVersion bool `long:"version" description:"Display version information and exit."` @@ -284,6 +284,8 @@ func (c *Config) lndConnectParams() (string, lndclient.Network, string, // defaultConfig returns a configuration struct with all default values set. func defaultConfig() *Config { defaultLogCfg := build.DefaultLogConfig() + + // nolint:ll return &Config{ HTTPSListen: defaultHTTPSListen, TLSCertPath: DefaultTLSCertPath, @@ -539,10 +541,15 @@ func loadAndValidateConfig(interceptor signal.Interceptor) (*Config, error) { // the remote connection as well. defaultFaradayCfg := faraday.DefaultConfig() if cfg.faradayRemote && cfg.Network != DefaultNetwork { - if cfg.Remote.Faraday.MacaroonPath == defaultFaradayCfg.MacaroonPath { - cfg.Remote.Faraday.MacaroonPath = cfg.Faraday.MacaroonPath + if cfg.Remote.Faraday.MacaroonPath == + defaultFaradayCfg.MacaroonPath { + + cfg.Remote.Faraday.MacaroonPath = + cfg.Faraday.MacaroonPath } - if cfg.Remote.Faraday.TLSCertPath == defaultFaradayCfg.TLSCertPath { + if cfg.Remote.Faraday.TLSCertPath == + defaultFaradayCfg.TLSCertPath { + cfg.Remote.Faraday.TLSCertPath = cfg.Faraday.TLSCertPath } } @@ -553,9 +560,10 @@ func loadAndValidateConfig(interceptor signal.Interceptor) (*Config, error) { cfg.faradayRpcConfig.MacaroonPath = cfg.Faraday.MacaroonPath if cfg.Faraday.ChainConn { - cfg.faradayRpcConfig.BitcoinClient, err = chain.NewBitcoinClient( - cfg.Faraday.Bitcoin, - ) + cfg.faradayRpcConfig.BitcoinClient, err = + chain.NewBitcoinClient( + cfg.Faraday.Bitcoin, + ) if err != nil { return nil, err } @@ -583,13 +591,16 @@ func loadAndValidateConfig(interceptor signal.Interceptor) (*Config, error) { defaultTapCfg := tapcfg.DefaultConfig() if cfg.tapRemote && cfg.Network != DefaultNetwork { - if cfg.Remote.TaprootAssets.MacaroonPath == defaultTapCfg.RpcConf.MacaroonPath { + if cfg.Remote.TaprootAssets.MacaroonPath == + defaultTapCfg.RpcConf.MacaroonPath { + macaroonPath := cfg.TaprootAssets.RpcConf.MacaroonPath cfg.Remote.TaprootAssets.MacaroonPath = macaroonPath } - if cfg.Remote.TaprootAssets.TLSCertPath == defaultTapCfg.RpcConf.TLSCertPath { - tlsCertPath := cfg.TaprootAssets.RpcConf.TLSCertPath + if cfg.Remote.TaprootAssets.TLSCertPath == + defaultTapCfg.RpcConf.TLSCertPath { + tlsCertPath := cfg.TaprootAssets.RpcConf.TLSCertPath cfg.Remote.TaprootAssets.TLSCertPath = tlsCertPath } } diff --git a/config_dev.go b/config_dev.go index 90b8b290f..0d51550c5 100644 --- a/config_dev.go +++ b/config_dev.go @@ -39,7 +39,7 @@ var defaultSqliteDatabasePath = filepath.Join( // features not yet available in production. Since our itests are built with // the dev tag, we can test these features in our itests. // -// nolint:lll +// nolint:ll type DevConfig struct { // DatabaseBackend is the database backend we will use for storing all // account related data. While this feature is still in development, we diff --git a/db/postgres.go b/db/postgres.go index 16e41dc09..2065d9f51 100644 --- a/db/postgres.go +++ b/db/postgres.go @@ -44,7 +44,7 @@ var ( // PostgresConfig holds the postgres database configuration. // -// nolint:lll +// nolint:ll type PostgresConfig struct { SkipMigrations bool `long:"skipmigrations" description:"Skip applying migrations on startup."` Host string `long:"host" description:"Database server hostname."` diff --git a/db/sqlite.go b/db/sqlite.go index 803362fa8..4a1339fcb 100644 --- a/db/sqlite.go +++ b/db/sqlite.go @@ -47,7 +47,7 @@ var ( // SqliteConfig holds all the config arguments needed to interact with our // sqlite DB. // -// nolint: lll +// nolint:ll type SqliteConfig struct { // SkipMigrations if true, then all the tables will be created on start // up if they don't already exist. diff --git a/firewall/caveats_test.go b/firewall/caveats_test.go index 9d8e4b81d..6d0cf117a 100644 --- a/firewall/caveats_test.go +++ b/firewall/caveats_test.go @@ -25,10 +25,11 @@ const ( // formatted as a caveat and then parsed again successfully. func TestInterceptMetaInfo(t *testing.T) { info := &InterceptMetaInfo{ - ActorName: "autopilot", - Feature: "re-balance", - Trigger: "channel 7413345453234435345 depleted", - Intent: "increase outbound liquidity by 2000000 sats", + ActorName: "autopilot", + Feature: "re-balance", + Trigger: "channel 7413345453234435345 depleted", + Intent: "increase outbound liquidity by 2000000 " + + "sats", StructuredJsonData: "{}", } @@ -93,6 +94,7 @@ func TestParseMetaInfoCaveat(t *testing.T) { // caveat and then parsed again successfully. func TestInterceptRule(t *testing.T) { rules := &InterceptRules{ + // nolint:ll FeatureRules: map[string]map[string]string{ "AutoFees": { "first-hop-ignore-list": "03abcd...,02badb01...", diff --git a/firewall/config.go b/firewall/config.go index 0e4b21506..9b05315f7 100644 --- a/firewall/config.go +++ b/firewall/config.go @@ -2,14 +2,14 @@ package firewall // Config holds all config options for the firewall. // -//nolint:lll +//nolint:ll type Config struct { RequestLogger *RequestLoggerConfig `group:"request-logger" namespace:"request-logger" description:"request logger settings"` } // RequestLoggerConfig holds all the config options for the request logger. // -//nolint:lll +//nolint:ll type RequestLoggerConfig struct { RequestLoggerLevel RequestLoggerLevel `long:"level" description:"Set the request logger level. Options include 'all', 'full' and 'interceptor''"` } diff --git a/firewall/privacy_mapper.go b/firewall/privacy_mapper.go index 49aaf20f2..8049595dc 100644 --- a/firewall/privacy_mapper.go +++ b/firewall/privacy_mapper.go @@ -338,7 +338,8 @@ func handleGetInfoResponse(db firewalldb.PrivacyMapDB, tx firewalldb.PrivacyMapTx) error { var err error - pseudoPubKey, err = firewalldb.HideString( //nolint:lll + // nolint:ll + pseudoPubKey, err = firewalldb.HideString( ctx, tx, r.IdentityPubkey, ) @@ -444,7 +445,8 @@ func handleFwdHistoryResponse(db firewalldb.PrivacyMapDB, timestamp := time.Unix(0, int64(fe.TimestampNs)) if !flags.Contains(session.ClearTimeStamps) { - // We randomize the forwarding timestamp. + // We randomize the forwarding + // timestamp. timestamp, err = hideTimestamp( randIntn, timeVariation, timestamp, @@ -511,9 +513,10 @@ func handleFeeReportResponse(db firewalldb.PrivacyMapDB, chanPoint := c.ChannelPoint if !flags.Contains(session.ClearChanIDs) { - chanPoint, err = firewalldb.HideChanPointStr( - ctx, tx, chanPoint, - ) + chanPoint, err = + firewalldb.HideChanPointStr( + ctx, tx, chanPoint, + ) if err != nil { return err } @@ -611,9 +614,10 @@ func handleListChannelsResponse(db firewalldb.PrivacyMapDB, chanPoint := c.ChannelPoint chanID := c.ChanId if hideChanIds { - chanPoint, err = firewalldb.HideChanPointStr( - ctx, tx, c.ChannelPoint, - ) + chanPoint, err = + firewalldb.HideChanPointStr( + ctx, tx, c.ChannelPoint, + ) if err != nil { return err } @@ -660,7 +664,8 @@ func handleListChannelsResponse(db firewalldb.PrivacyMapDB, if !flags.Contains(session.ClearAmounts) { // We adapt the remote balance // accordingly. - remoteBalance = c.Capacity - localBalance + remoteBalance = + c.Capacity - localBalance } // We hide the total sats sent and received. @@ -698,7 +703,7 @@ func handleListChannelsResponse(db firewalldb.PrivacyMapDB, return err } - //nolint:lll + //nolint:ll channels[i] = &lnrpc.Channel{ // Items we adjust. RemotePubkey: remotePub, @@ -780,7 +785,8 @@ func handleUpdatePolicyRequest(db firewalldb.PrivacyMapDB, tx firewalldb.PrivacyMapTx) error { var err error - newTxid, newIndex, err = firewalldb.RevealChanPoint( //nolint:lll + // nolint:ll + newTxid, newIndex, err = firewalldb.RevealChanPoint( ctx, tx, newTxid, newIndex, ) return err @@ -986,9 +992,10 @@ func handleClosedChannelsResponse(db firewalldb.PrivacyMapDB, channelPoint := c.ChannelPoint if !flags.Contains(session.ClearChanIDs) { - channelPoint, err = firewalldb.HideChanPointStr( - ctx, tx, c.ChannelPoint, - ) + channelPoint, err = + firewalldb.HideChanPointStr( + ctx, tx, c.ChannelPoint, + ) if err != nil { return err } @@ -1006,9 +1013,11 @@ func handleClosedChannelsResponse(db firewalldb.PrivacyMapDB, closingTxid := c.ClosingTxHash if !flags.Contains(session.ClearClosingTxIds) { - closingTxid, err = firewalldb.HideString( - ctx, tx, c.ClosingTxHash, - ) + closingTxid, err = + firewalldb.HideString( + ctx, tx, + c.ClosingTxHash, + ) if err != nil { return err } @@ -1172,6 +1181,7 @@ func handlePendingChannelsResponse(db firewalldb.PrivacyMapDB, return err } + // nolint:ll pendingOpen := lnrpc.PendingChannelsResponse_PendingOpenChannel{ // Non-obfuscated fields. CommitFee: c.CommitFee, @@ -1198,7 +1208,8 @@ func handlePendingChannelsResponse(db firewalldb.PrivacyMapDB, closingTxid := c.ClosingTxid if !flags.Contains(session.ClearClosingTxIds) { - closingTxid, err = firewalldb.HideString( //nolint:lll + // nolint:ll + closingTxid, err = firewalldb.HideString( ctx, tx, c.ClosingTxid, ) if err != nil { @@ -1206,6 +1217,7 @@ func handlePendingChannelsResponse(db firewalldb.PrivacyMapDB, } } + // nolint:ll pendingClose := lnrpc.PendingChannelsResponse_ClosedChannel{ // Obfuscated fields. ClosingTxid: closingTxid, @@ -1227,9 +1239,10 @@ func handlePendingChannelsResponse(db firewalldb.PrivacyMapDB, closingTxid := c.ClosingTxid if !flags.Contains(session.ClearClosingTxIds) { - closingTxid, err = firewalldb.HideString( - ctx, tx, c.ClosingTxid, - ) + closingTxid, err = + firewalldb.HideString( + ctx, tx, c.ClosingTxid, + ) if err != nil { return err } @@ -1257,6 +1270,7 @@ func handlePendingChannelsResponse(db firewalldb.PrivacyMapDB, limboBalance = pendingChannel.Capacity } + // nolint:ll pendingForceClose := lnrpc.PendingChannelsResponse_ForceClosedChannel{ // Obfuscated fields. ClosingTxid: closingTxid, @@ -1299,9 +1313,10 @@ func handlePendingChannelsResponse(db firewalldb.PrivacyMapDB, closingTxid := c.ClosingTxid if !flags.Contains(session.ClearClosingTxIds) { - closingTxid, err = firewalldb.HideString( - ctx, tx, closingTxid, - ) + closingTxid, err = + firewalldb.HideString( + ctx, tx, closingTxid, + ) if err != nil { return err } @@ -1316,14 +1331,16 @@ func handlePendingChannelsResponse(db firewalldb.PrivacyMapDB, session.ClearClosingTxIds, ) { - closingTxHex, err = firewalldb.HideString( - ctx, tx, closingTxHex, - ) + closingTxHex, err = + firewalldb.HideString( + ctx, tx, closingTxHex, + ) if err != nil { return err } } + // nolint:ll waitingCloseChannel := lnrpc.PendingChannelsResponse_WaitingCloseChannel{ Channel: pendingChannel, LimboBalance: limboBalance, @@ -1381,14 +1398,16 @@ func handleBatchOpenChannelRequest(db firewalldb.PrivacyMapDB, // GetInfo or the like. nodePubkey := c.NodePubkey if !flags.Contains(session.ClearPubkeys) { - nodePubkey, err = firewalldb.RevealBytes( - ctx, tx, c.NodePubkey, - ) + nodePubkey, err = + firewalldb.RevealBytes( + ctx, tx, c.NodePubkey, + ) if err != nil { return err } } + // nolint:ll reqs[i] = &lnrpc.BatchOpenChannel{ // Obfuscated fields. NodePubkey: nodePubkey, @@ -1457,7 +1476,8 @@ func handleBatchOpenChannelResponse(db firewalldb.PrivacyMapDB, return err } - txID, outIdx, err := firewalldb.HideChanPoint( //nolint:lll + // nolint:ll + txID, outIdx, err := firewalldb.HideChanPoint( ctx, tx, txId.String(), p.OutputIndex, ) @@ -1533,6 +1553,7 @@ func handleChannelOpenRequest(db firewalldb.PrivacyMapDB, return nil, err } + // nolint:ll return &lnrpc.OpenChannelRequest{ // Obfuscated fields. NodePubkey: nodePubkey, @@ -1625,6 +1646,7 @@ func handleChannelOpenResponse(db firewalldb.PrivacyMapDB, return nil, err } + // nolint:ll return &lnrpc.ChannelPoint{ FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ FundingTxidBytes: hash[:], diff --git a/firewall/privacy_mapper_test.go b/firewall/privacy_mapper_test.go index 61b24cd4c..53dcf6ebe 100644 --- a/firewall/privacy_mapper_test.go +++ b/firewall/privacy_mapper_test.go @@ -27,16 +27,20 @@ func TestPrivacyMapper(t *testing.T) { } // Define some transaction outpoints used for mapping. + // + // nolint:ll clearTxID := "abcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcd" clearTxIDReveresed, err := chainhash.NewHashFromStr(clearTxID) require.NoError(t, err) + // nolint:ll obfusTxID0 := "097ef666a61919ff3413b3b701eae3a5cbac08f70c0ca567806e1fa6acbfe384" require.NoError(t, err) obfusOut0 := uint32(2161781494) obfusTxID0Reversed, err := chainhash.NewHashFromStr(obfusTxID0) require.NoError(t, err) + // nolint:ll obfusTxID1 := "45ec471bfccb0b7b9a8bc4008248931c59ad994903e07b54f54821ea3ef5cc5c" obfusOut1 := uint32(1642614131) @@ -135,6 +139,7 @@ func TestPrivacyMapper(t *testing.T) { } ) + // nolint:ll tests := []struct { name string privacyFlags session.PrivacyFlags @@ -376,6 +381,7 @@ func TestPrivacyMapper(t *testing.T) { }, }, expectedReplacement: &lnrpc.PolicyUpdateResponse{ + // nolint:ll FailedUpdates: []*lnrpc.FailedUpdate{ { Outpoint: &lnrpc.OutPoint{ @@ -396,6 +402,7 @@ func TestPrivacyMapper(t *testing.T) { UnconfirmedBalance: 1_000_000, LockedBalance: 1_000_000, ReservedBalanceAnchorChan: 1_000_000, + // nolint:ll AccountBalance: map[string]*lnrpc.WalletAccountBalance{ "first": { ConfirmedBalance: 1_000_000, @@ -410,6 +417,7 @@ func TestPrivacyMapper(t *testing.T) { UnconfirmedBalance: 950_100, LockedBalance: 950_100, ReservedBalanceAnchorChan: 950_100, + // nolint:ll AccountBalance: map[string]*lnrpc.WalletAccountBalance{ "first": { ConfirmedBalance: 950_100, @@ -457,6 +465,7 @@ func TestPrivacyMapper(t *testing.T) { uri: "/lnrpc.Lightning/ClosedChannels", msgType: rpcperms.TypeResponse, msg: &lnrpc.ClosedChannelsResponse{ + // nolint:ll Channels: []*lnrpc.ChannelCloseSummary{ { ChannelPoint: outPoint( @@ -481,6 +490,7 @@ func TestPrivacyMapper(t *testing.T) { }, }, expectedReplacement: &lnrpc.ClosedChannelsResponse{ + // nolint:ll Channels: []*lnrpc.ChannelCloseSummary{ { ChannelPoint: outPoint( @@ -502,6 +512,7 @@ func TestPrivacyMapper(t *testing.T) { name: "ClosedChannels Response clear", uri: "/lnrpc.Lightning/ClosedChannels", msgType: rpcperms.TypeResponse, + // nolint:ll msg: &lnrpc.ClosedChannelsResponse{ Channels: []*lnrpc.ChannelCloseSummary{ { @@ -533,6 +544,7 @@ func TestPrivacyMapper(t *testing.T) { session.ClearAmounts, }, expectedReplacement: &lnrpc.ClosedChannelsResponse{ + // nolint:ll Channels: []*lnrpc.ChannelCloseSummary{ { ChannelPoint: outPoint( @@ -554,6 +566,7 @@ func TestPrivacyMapper(t *testing.T) { name: "PendingChannels Response", uri: "/lnrpc.Lightning/PendingChannels", msgType: rpcperms.TypeResponse, + // nolint:ll msg: &lnrpc.PendingChannelsResponse{ PendingOpenChannels: []*lnrpc.PendingChannelsResponse_PendingOpenChannel{ { @@ -595,6 +608,7 @@ func TestPrivacyMapper(t *testing.T) { }, }, }, + // nolint:ll expectedReplacement: &lnrpc.PendingChannelsResponse{ PendingOpenChannels: []*lnrpc.PendingChannelsResponse_PendingOpenChannel{ { @@ -636,6 +650,7 @@ func TestPrivacyMapper(t *testing.T) { uri: "/lnrpc.Lightning/PendingChannels", msgType: rpcperms.TypeResponse, msg: &lnrpc.PendingChannelsResponse{ + // nolint:ll PendingOpenChannels: []*lnrpc.PendingChannelsResponse_PendingOpenChannel{ { CommitFee: 123, @@ -658,6 +673,7 @@ func TestPrivacyMapper(t *testing.T) { session.ClearAmounts, session.ClearChanIDs, }, + // nolint:ll expectedReplacement: &lnrpc.PendingChannelsResponse{ PendingOpenChannels: []*lnrpc.PendingChannelsResponse_PendingOpenChannel{ { @@ -712,15 +728,16 @@ func TestPrivacyMapper(t *testing.T) { name: "BatchOpenChannel Response", uri: "/lnrpc.Lightning/BatchOpenChannel", msgType: rpcperms.TypeResponse, + // nolint:ll msg: &lnrpc.BatchOpenChannelResponse{ PendingChannels: []*lnrpc.PendingUpdate{ { - Txid: clearTxIDReveresed[:], OutputIndex: 0, }, }, }, + // nolint:ll expectedReplacement: &lnrpc.BatchOpenChannelResponse{ PendingChannels: []*lnrpc.PendingUpdate{ { @@ -1015,6 +1032,7 @@ func TestPrivacyMapper(t *testing.T) { session.AddToGRPCMetadata(md, sessionID) for i := 0; i < numSamples; i++ { + // nolint:ll interceptReq := &rpcperms.InterceptionRequest{ Type: rpcperms.TypeResponse, Macaroon: mac, @@ -1326,7 +1344,7 @@ func TestHideBool(t *testing.T) { // TestObfuscateConfig tests that we substitute substrings in the config // correctly. // -//nolint:lll +//nolint:ll func TestObfuscateConfig(t *testing.T) { tests := []struct { name string diff --git a/firewalldb/action_paginator.go b/firewalldb/action_paginator.go index d2bd3d2f6..8086733bb 100644 --- a/firewalldb/action_paginator.go +++ b/firewalldb/action_paginator.go @@ -222,6 +222,7 @@ func (p *actionPaginator) queryCountAll() ([]*Action, uint64, uint64, error) { totalCount++ + // nolint:ll if p.cfg.IndexOffset != 0 && binary.BigEndian.Uint64(indexKey) == p.cfg.IndexOffset+1 { diff --git a/firewalldb/actions.go b/firewalldb/actions.go index 9a4e350be..787887e27 100644 --- a/firewalldb/actions.go +++ b/firewalldb/actions.go @@ -318,9 +318,9 @@ func (s *groupActionsReadDB) ListActions(ctx context.Context) ([]*RuleAction, return actions, nil } -// groupFeatureActionsReadDB is an implementation of the rules.ActionsListDB that -// will provide read access to all the Actions of a feature within a particular -// group. +// groupFeatureActionsReadDB is an implementation of the rules.ActionsListDB +// that will provide read access to all the Actions of a feature within a +// particular group. type groupFeatureActionsReadDB struct { *allActionsReadDB } diff --git a/firewalldb/actions_kvdb.go b/firewalldb/actions_kvdb.go index adf58eb02..159c286e9 100644 --- a/firewalldb/actions_kvdb.go +++ b/firewalldb/actions_kvdb.go @@ -32,6 +32,7 @@ const ( typeLocatorActionID tlv.Type = 2 ) +// nolint:ll /* The Actions are stored in the following structure in the KV db: diff --git a/firewalldb/actions_sql.go b/firewalldb/actions_sql.go index cfa67c18a..355a450f2 100644 --- a/firewalldb/actions_sql.go +++ b/firewalldb/actions_sql.go @@ -26,7 +26,7 @@ type SQLAccountQueries interface { // SQLActionQueries is a subset of the sqlc.Queries interface that can be used // to interact with action related tables. // -//nolint:lll +//nolint:ll type SQLActionQueries interface { SQLSessionQueries SQLAccountQueries diff --git a/firewalldb/kvstores_kvdb.go b/firewalldb/kvstores_kvdb.go index 221730a6c..c75e66f9f 100644 --- a/firewalldb/kvstores_kvdb.go +++ b/firewalldb/kvstores_kvdb.go @@ -8,6 +8,7 @@ import ( "go.etcd.io/bbolt" ) +// nolint:ll /* The KVStores are stored in the following structure in the KV db. Note that the `perm` and `temp` buckets are identical in structure. The only difference @@ -347,9 +348,10 @@ func (s *kvStoreTx) getSessionFeatureRuleBucket(perm bool) getBucketFunc { } if create { - featureBucket, err := sessBucket.CreateBucketIfNotExists( - featureKVStoreBucketKey, - ) + featureBucket, err := + sessBucket.CreateBucketIfNotExists( + featureKVStoreBucketKey, + ) if err != nil { return nil, err } diff --git a/firewalldb/kvstores_sql.go b/firewalldb/kvstores_sql.go index 248892130..a205927aa 100644 --- a/firewalldb/kvstores_sql.go +++ b/firewalldb/kvstores_sql.go @@ -16,7 +16,7 @@ import ( // SQLKVStoreQueries is a subset of the sqlc.Queries interface that can be // used to interact with the kvstore tables. // -//nolint:lll +//nolint:ll type SQLKVStoreQueries interface { SQLSessionQueries diff --git a/firewalldb/privacy_mapper_sql.go b/firewalldb/privacy_mapper_sql.go index 8a4863a6c..4ed7a45ce 100644 --- a/firewalldb/privacy_mapper_sql.go +++ b/firewalldb/privacy_mapper_sql.go @@ -12,7 +12,7 @@ import ( // SQLPrivacyPairQueries is a subset of the sqlc.Queries interface that can be // used to interact with the privacy map table. // -//nolint:lll +//nolint:ll type SQLPrivacyPairQueries interface { SQLSessionQueries diff --git a/firewalldb/sql_migration.go b/firewalldb/sql_migration.go index 90b70142e..cc1f63029 100644 --- a/firewalldb/sql_migration.go +++ b/firewalldb/sql_migration.go @@ -894,6 +894,8 @@ func migrateActionsToSQL(ctx context.Context, kvStore *bbolt.DB, // Iterate over session ID buckets (i.e. what we should name // macaroon IDs). + // + // nolint:ll return sessionsBucket.ForEach(func(macID []byte, v []byte) error { if v != nil { return fmt.Errorf("expected only sub-buckets " + @@ -984,7 +986,8 @@ func migrateActionsToSQL(ctx context.Context, kvStore *bbolt.DB, return fmt.Errorf("iterating over actions failed: %w", err) } - log.Infof("Finished iterating actions in KV store (no persistence yet).") + log.Infof("Finished iterating actions in KV store " + + "(no persistence yet).") return nil } @@ -1097,8 +1100,8 @@ func validateMigratedAction(ctx context.Context, sqlTx SQLQueries, ctx, insertParams.SessionID.Int64, ) if err != nil { - return fmt.Errorf("unable to get session with id %d: %w", - insertParams.SessionID.Int64, err) + return fmt.Errorf("unable to get session with id %d: "+ + "%w", insertParams.SessionID.Int64, err) } overriddenSessID = fn.Some(session.ID(sess.Alias)) @@ -1109,8 +1112,8 @@ func validateMigratedAction(ctx context.Context, sqlTx SQLQueries, ctx, insertParams.AccountID.Int64, ) if err != nil { - return fmt.Errorf("unable to get account with id %d: %w", - insertParams.AccountID.Int64, err) + return fmt.Errorf("unable to get account with id %d: "+ + "%w", insertParams.AccountID.Int64, err) } acctAlias, err := accounts.AccountIDFromInt64(acct.Alias) diff --git a/firewalldb/sql_migration_test.go b/firewalldb/sql_migration_test.go index 2071b9d58..47df26da7 100644 --- a/firewalldb/sql_migration_test.go +++ b/firewalldb/sql_migration_test.go @@ -387,6 +387,8 @@ func TestFirewallDBMigration(t *testing.T) { // The tests slice contains all the tests that we will run for the // migration of the firewalldb from a BoltDB to a SQLDB. + // + // nolint:ll tests := []struct { name string populateDB func(t *testing.T, ctx context.Context, @@ -1863,6 +1865,7 @@ func randomActions(t *testing.T, ctx context.Context, boltDB *BoltDB, acctAlias, err := newAcctID.ToInt64() require.NoError(t, err) + // nolint:ll _, err = acctSqlStore.UpdateAccountAliasForTests( ctx, sqlc.UpdateAccountAliasForTestsParams{ Alias: acctAlias, @@ -2185,7 +2188,8 @@ func randomString(n int) string { func randomBytes(n int) []byte { b := make([]byte, n) for i := range b { - b[i] = byte(rand.Intn(256)) // Random int between 0-255, then cast to byte + // Random int between 0-255, then cast to byte. + b[i] = byte(rand.Intn(256)) } return b } diff --git a/gzip.go b/gzip.go index 7bd1a6a38..2099e67c8 100644 --- a/gzip.go +++ b/gzip.go @@ -21,7 +21,10 @@ func (w gzipResponseWriter) Write(b []byte) (int, error) { func makeGzipHandler(handler http.HandlerFunc) http.HandlerFunc { return func(resp http.ResponseWriter, req *http.Request) { // Check if the client can accept the gzip encoding. - if !strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") { + isGzipEncoding := strings.Contains( + req.Header.Get("Accept-Encoding"), "gzip", + ) + if !isGzipEncoding { // The client cannot accept it, so return the output // uncompressed. handler(resp, req) @@ -31,6 +34,9 @@ func makeGzipHandler(handler http.HandlerFunc) http.HandlerFunc { resp.Header().Set("Content-Encoding", "gzip") gzipWriter := gzip.NewWriter(resp) defer gzipWriter.Close() - handler(gzipResponseWriter{Writer: gzipWriter, ResponseWriter: resp}, req) + gzipRespWriter := gzipResponseWriter{ + Writer: gzipWriter, ResponseWriter: resp, + } + handler(gzipRespWriter, req) } } diff --git a/itest/assets_test.go b/itest/assets_test.go index 86ac326fb..7ff573aba 100644 --- a/itest/assets_test.go +++ b/itest/assets_test.go @@ -59,7 +59,7 @@ const ( DefaultPushSat int64 = 1062 ) -// nolint: lll +// nolint:ll var ( failureNoBalance = lnrpc.PaymentFailureReason_FAILURE_REASON_INSUFFICIENT_BALANCE failureNoRoute = lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE @@ -554,7 +554,8 @@ func createTestAssetNetwork(t *harnessTest, net *NetworkHarness, charlieTap, // After opening the channels, the asset balance of the funding nodes // should have been decreased with the funding amount. assertBalance( - t.t, charlieTap, charlieAssetBalance, itest.WithAssetID(assetID), + t.t, charlieTap, charlieAssetBalance, + itest.WithAssetID(assetID), ) assertBalance( t.t, daveTap, daveAssetBalance, itest.WithAssetID(assetID), @@ -3194,8 +3195,8 @@ func assertPendingForceCloseChannelAssetData(t *testing.T, node *HarnessNode, error) { if len(resp.PendingForceClosingChannels) == 0 { - return nil, fmt.Errorf("no pending force close " + - "channels found") + return nil, fmt.Errorf("no pending force " + + "close channels found") } for _, ch := range resp.PendingForceClosingChannels { @@ -3528,9 +3529,10 @@ func assertForceCloseSweeps(ctx context.Context, net *NetworkHarness, // We'll pause here and wait until the sweeper recognizes that we've // offered the second level sweep transaction. + // + // nolint:ll assertSweepExists( t.t, alice, - //nolint: lll walletrpc.WitnessType_TAPROOT_HTLC_ACCEPTED_SUCCESS_SECOND_LEVEL, ) @@ -3642,9 +3644,9 @@ func assertForceCloseSweeps(ctx context.Context, net *NetworkHarness, // If we didn't yet sweep all HTLCs, then we need to wait for another // sweep. if numSweptHTLCs < numTimeoutHTLCs { + // nolint:ll assertSweepExists( t.t, bob, - // nolint: lll walletrpc.WitnessType_TAPROOT_HTLC_OFFERED_REMOTE_TIMEOUT, ) diff --git a/itest/litd_custom_channels_test.go b/itest/litd_custom_channels_test.go index cfa00e9ff..993aa0389 100644 --- a/itest/litd_custom_channels_test.go +++ b/itest/litd_custom_channels_test.go @@ -89,6 +89,7 @@ var ( "--taproot-assets.universerpccourier.maxbackoff=600ms", "--taproot-assets.custodianproofretrievaldelay=500ms", } + // nolint:ll litdArgsTemplate = append(litdArgsTemplateNoOracle, []string{ "--taproot-assets.experimental.rfq.priceoracleaddress=" + "use_mock_price_oracle_service_promise_to_" + @@ -98,6 +99,7 @@ var ( "--taproot-assets.experimental.rfq.acceptpricedeviationppm=50000", }...) + // nolint:ll litdArgsTemplateDiffOracle = append(litdArgsTemplateNoOracle, []string{ "--taproot-assets.experimental.rfq.priceoracleaddress=" + "use_mock_price_oracle_service_promise_to_" + @@ -2338,7 +2340,9 @@ func testCustomChannelsV1Upgrade(ctx context.Context, net *NetworkHarness, ) require.NoError(t.t, err) - charlie, err := net.NewNode(t.t, "Charlie", lndArgs, false, true, litdArgs...) + charlie, err := net.NewNode( + t.t, "Charlie", lndArgs, false, true, litdArgs..., + ) require.NoError(t.t, err) // Next we'll connect all the nodes and also fund them with some coins. diff --git a/itest/litd_firewall_test.go b/itest/litd_firewall_test.go index 5c82bd4b3..878559bbc 100644 --- a/itest/litd_firewall_test.go +++ b/itest/litd_firewall_test.go @@ -976,6 +976,7 @@ func testChannelOpening(net *NetworkHarness, ht *harnessTest, t *testing.T) { net.autopilotServer.SetFeatures(map[string]*mock.Feature{ "OpenChannels": { Description: "open channels while you sleep!", + // nolint:ll Rules: map[string]*mock.RuleRanges{ rules.OnChainBudgetName: onChainBudgetRule, rules.ChanConstraintName: chanConstraintsRule, @@ -1028,6 +1029,7 @@ func testChannelOpening(net *NetworkHarness, ht *harnessTest, t *testing.T) { MailboxServerAddr: mailboxServerAddr, Features: map[string]*litrpc.FeatureConfig{ "OpenChannels": { + // nolint:ll Rules: &litrpc.RulesMap{ Rules: map[string]*litrpc.RuleValue{ rules.ChanConstraintName: { @@ -1292,6 +1294,7 @@ func testChannelOpening(net *NetworkHarness, ht *harnessTest, t *testing.T) { MailboxServerAddr: mailboxServerAddr, Features: map[string]*litrpc.FeatureConfig{ "OpenChannels": { + // nolint:ll Rules: &litrpc.RulesMap{ Rules: map[string]*litrpc.RuleValue{ rules.OnChainBudgetName: { @@ -1474,6 +1477,7 @@ func testRateLimitAndPrivacyMapper(net *NetworkHarness, t *harnessTest) { time.Now().Add(5 * time.Minute).Unix(), ), MailboxServerAddr: mailboxServerAddr, + // nolint:ll Features: map[string]*litrpc.FeatureConfig{ "HealthCheck": { Rules: &litrpc.RulesMap{ @@ -1700,6 +1704,7 @@ func testHistoryLimitRule(net *NetworkHarness, t *harnessTest) { time.Now().Add(5 * time.Minute).Unix(), ), MailboxServerAddr: mailboxServerAddr, + // nolint:ll Features: map[string]*litrpc.FeatureConfig{ "AutoFees": { Rules: &litrpc.RulesMap{ @@ -1820,6 +1825,7 @@ func testChanPolicyBoundsRule(net *NetworkHarness, t *harnessTest) { net.autopilotServer.SetFeatures(map[string]*mock.Feature{ "AutoFees": { Description: "manages your channel fees", + // nolint:ll Rules: map[string]*mock.RuleRanges{ rules.ChanPolicyBoundsName: chanPolicyBoundsRule, }, @@ -1857,6 +1863,7 @@ func testChanPolicyBoundsRule(net *NetworkHarness, t *harnessTest) { Features: map[string]*litrpc.FeatureConfig{ "AutoFees": { Rules: &litrpc.RulesMap{ + // nolint:ll Rules: map[string]*litrpc.RuleValue{ rules.ChanPolicyBoundsName: { Value: policyBounds, @@ -2117,6 +2124,7 @@ func testPeerAndChannelRestrictRules(net *NetworkHarness, t *harnessTest) { MailboxServerAddr: mailboxServerAddr, Features: map[string]*litrpc.FeatureConfig{ "AutoFees": { + // nolint:ll Rules: &litrpc.RulesMap{ Rules: map[string]*litrpc.RuleValue{ rules.PeersRestrictName: { @@ -2385,24 +2393,23 @@ func testLargeHttpHeader(ctx context.Context, net *NetworkHarness, // Add a new Autopilot session that subscribes to a "Test", feature. // This call is expected to also result in Litd registering this session // with the mock autopilot server. - sessResp, err := litClient.AddAutopilotSession( - ctxm, &litrpc.AddAutopilotSessionRequest{ - Label: "integration-test", - ExpiryTimestampSeconds: uint64( - time.Now().Add(5 * time.Minute).Unix(), - ), - MailboxServerAddr: mailboxServerAddr, - Features: map[string]*litrpc.FeatureConfig{ - "TestFeature": { - Rules: &litrpc.RulesMap{ - Rules: map[string]*litrpc.RuleValue{}, - }, + req := litrpc.AddAutopilotSessionRequest{ + Label: "integration-test", + ExpiryTimestampSeconds: uint64( + time.Now().Add(5 * time.Minute).Unix(), + ), + MailboxServerAddr: mailboxServerAddr, + Features: map[string]*litrpc.FeatureConfig{ + "TestFeature": { + Rules: &litrpc.RulesMap{ + Rules: map[string]*litrpc.RuleValue{}, }, }, - // Switch the privacy mapper off for simplicity’s sake. - NoPrivacyMapper: true, }, - ) + // Switch the privacy mapper off for simplicity’s sake. + NoPrivacyMapper: true, + } + sessResp, err := litClient.AddAutopilotSession(ctxm, &req) require.NoError(t.t, err) // We now connect to the mailbox from the PoV of the autopilot server. @@ -2525,7 +2532,9 @@ func connectMailboxWithRemoteKey(ctx context.Context, transportConn, err := mailbox.NewGrpcClient( ctx, mailboxServerAddr, connData, - grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{})), + grpc.WithTransportCredentials( + credentials.NewTLS(&tls.Config{}), + ), ) if err != nil { return nil, nil, err diff --git a/itest/litd_mode_integrated_test.go b/itest/litd_mode_integrated_test.go index 718092bfc..f6b9a7ca8 100644 --- a/itest/litd_mode_integrated_test.go +++ b/itest/litd_mode_integrated_test.go @@ -817,7 +817,8 @@ func runCertificateCheck(t *testing.T, node *HarnessNode) { require.NoError(t, err) require.Len(t, litCerts, 1) require.Equal( - t, "litd autogenerated cert", litCerts[0].Issuer.Organization[0], + t, "litd autogenerated cert", + litCerts[0].Issuer.Organization[0], ) lndCerts, err := getServerCertificates(node.Cfg.RPCAddr()) @@ -1357,7 +1358,9 @@ func connectMailboxWithPairingPhrase(ctx context.Context, transportConn, err := mailbox.NewGrpcClient( ctx, mailboxServerAddr, connData, - grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{})), + grpc.WithTransportCredentials( + credentials.NewTLS(&tls.Config{}), + ), ) if err != nil { return nil, err diff --git a/itest/litd_node.go b/itest/litd_node.go index b01f0cd51..e8bf5b4f4 100644 --- a/itest/litd_node.go +++ b/itest/litd_node.go @@ -637,7 +637,8 @@ func (hn *HarnessNode) InvoiceMacPath() string { return hn.Cfg.InvoiceMacPath } -// renameFile is a helper to rename (log) files created during integration tests. +// renameFile is a helper to rename (log) files created during integration +// tests. func renameFile(fromFileName, toFileName string) { err := os.Rename(fromFileName, toFileName) if err != nil { @@ -807,8 +808,8 @@ func (hn *HarnessNode) Start(litdBinary string, return nil } - // Since Stop uses the LightningClient to stop the node, if we fail to get a - // connected client, we have to kill the process. + // Since Stop uses the LightningClient to stop the node, if we fail to + // get a connected client, we have to kill the process. useMacaroons := !hn.Cfg.HasSeed conn, err := hn.ConnectRPC(useMacaroons) if err != nil { @@ -1273,7 +1274,10 @@ func (hn *HarnessNode) initLightningClient(conn *grpc.ClientConn) error { // Set the harness node's pubkey to what the node claims in GetInfo. // Since the RPC might not be immediately active, we wrap the call in a // wait.NoError. - if err := wait.NoError(hn.FetchNodeInfo, lntest.DefaultTimeout); err != nil { + err := wait.NoError( + hn.FetchNodeInfo, lntest.DefaultTimeout, + ) + if err != nil { return err } @@ -1331,7 +1335,8 @@ func (hn *HarnessNode) ReadMacaroon(macPath string, timeout time.Duration) ( err := wait.NoError(func() error { macBytes, err := ioutil.ReadFile(macPath) if err != nil { - return fmt.Errorf("error reading macaroon file: %v", err) + return fmt.Errorf("error reading macaroon file: %v", + err) } newMac := &macaroon.Macaroon{} @@ -1432,7 +1437,8 @@ func (hn *HarnessNode) cleanup() error { if hn.backupDbDir != "" { err := os.RemoveAll(hn.backupDbDir) if err != nil { - return fmt.Errorf("unable to remove backup dir: %v", err) + return fmt.Errorf("unable to remove backup dir: %v", + err) } } @@ -1461,7 +1467,9 @@ func (hn *HarnessNode) Stop() error { return nil // Try again if a recovery/rescan is in progress. - case strings.Contains(err.Error(), "recovery in progress"): + case strings.Contains( + err.Error(), "recovery in progress", + ): return err default: @@ -1506,9 +1514,10 @@ func (hn *HarnessNode) Stop() error { // Close any attempts at further grpc connections. if hn.conn != nil { err := hn.conn.Close() - if err != nil && - !strings.Contains(err.Error(), "connection is closing") { - + isConnClosingErr := strings.Contains( + err.Error(), "connection is closing", + ) + if err != nil && !isConnClosingErr { return fmt.Errorf("error attempting to stop grpc "+ "client: %v", err) } @@ -1803,7 +1812,9 @@ func (hn *HarnessNode) WaitForBlockchainSync(ctx context.Context) error { // WaitForBalance waits until the node sees the expected confirmed/unconfirmed // balance within their wallet. -func (hn *HarnessNode) WaitForBalance(expectedBalance btcutil.Amount, confirmed bool) error { +func (hn *HarnessNode) WaitForBalance(expectedBalance btcutil.Amount, + confirmed bool) error { + ctx := context.Background() req := &lnrpc.WalletBalanceRequest{} @@ -1815,18 +1826,23 @@ func (hn *HarnessNode) WaitForBalance(expectedBalance btcutil.Amount, confirmed } if confirmed { - lastBalance = btcutil.Amount(balance.ConfirmedBalance) - return btcutil.Amount(balance.ConfirmedBalance) == expectedBalance + balanceAmt := btcutil.Amount( + balance.ConfirmedBalance, + ) + lastBalance = balanceAmt + return balanceAmt == expectedBalance } - lastBalance = btcutil.Amount(balance.UnconfirmedBalance) - return btcutil.Amount(balance.UnconfirmedBalance) == expectedBalance + balanceAmt := btcutil.Amount(balance.UnconfirmedBalance) + lastBalance = balanceAmt + return balanceAmt == expectedBalance } err := wait.Predicate(doesBalanceMatch, lntest.DefaultTimeout) if err != nil { return fmt.Errorf("balances not synced after deadline: "+ - "expected %v, only have %v", expectedBalance, lastBalance) + "expected %v, only have %v", expectedBalance, + lastBalance) } return nil diff --git a/itest/network_harness.go b/itest/network_harness.go index 1469c5ec6..347f597c9 100644 --- a/itest/network_harness.go +++ b/itest/network_harness.go @@ -44,8 +44,8 @@ type NetworkHarness struct { // compiled with all required itest flags. litdBinary string - // Miner is a reference to a running full node that can be used to create - // new blocks on the network. + // Miner is a reference to a running full node that can be used to + // create new blocks on the network. Miner *miner.HarnessMiner LNDHarness *lntest.HarnessTest @@ -467,9 +467,11 @@ func (n *NetworkHarness) connect(ctx context.Context, tryconnect: if _, err := a.ConnectPeer(ctx, req); err != nil { // If the chain backend is still syncing, retry. - if strings.Contains(err.Error(), lnd.ErrServerNotActive.Error()) || - strings.Contains(err.Error(), "i/o timeout") { - + isNotActiveErr := strings.Contains( + err.Error(), lnd.ErrServerNotActive.Error(), + ) + isTimeoutErr := strings.Contains(err.Error(), "i/o timeout") + if isNotActiveErr || isTimeoutErr { select { case <-time.After(100 * time.Millisecond): goto tryconnect @@ -520,7 +522,9 @@ func (n *NetworkHarness) EnsureConnected(t *testing.T, a, b *HarnessNode) { var predErr error err = wait.Predicate(func() bool { - ctx, cancel := context.WithTimeout(ctx, lntest.DefaultTimeout) + ctx, cancel := context.WithTimeout( + ctx, lntest.DefaultTimeout, + ) defer cancel() err := n.connect(ctx, req, a) @@ -748,7 +752,9 @@ func (n *NetworkHarness) RestartNode(node *HarnessNode, callback func() error, // Give the node some time to catch up with the chain before we continue // with the tests. - ctxc, done := context.WithTimeout(context.Background(), lntest.DefaultTimeout) + ctxc, done := context.WithTimeout( + context.Background(), lntest.DefaultTimeout, + ) defer done() return node.WaitForBlockchainSync(ctxc) } @@ -941,7 +947,9 @@ func (n *NetworkHarness) OpenChannel(srcNode, destNode *HarnessNode, // The cancel is intentionally left out here because the returned // item(open channel client) relies on the context being active. This // will be fixed once we finish refactoring the NetworkHarness. - ctx, _ := context.WithTimeout(ctxb, wait.ChannelOpenTimeout) // nolint: govet + // + // nolint:govet + ctx, _ := context.WithTimeout(ctxb, wait.ChannelOpenTimeout) // Wait until srcNode and destNode have the latest chain synced. // Otherwise, we may run into a check within the funding manager that @@ -982,17 +990,18 @@ func (n *NetworkHarness) OpenChannel(srcNode, destNode *HarnessNode, chanOpen := make(chan struct{}) errChan := make(chan error) go func() { - // Consume the "channel pending" update. This waits until the node - // notifies us that the final message in the channel funding workflow - // has been sent to the remote node. + // Consume the "channel pending" update. This waits until the + // node notifies us that the final message in the channel + // funding workflow has been sent to the remote node. resp, err := respStream.Recv() if err != nil { errChan <- err return } - if _, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending); !ok { - errChan <- fmt.Errorf("expected channel pending update, "+ - "instead got %v", resp) + _, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending) + if !ok { + errChan <- fmt.Errorf("expected channel pending "+ + "update, instead got %v", resp) return } @@ -1010,10 +1019,10 @@ func (n *NetworkHarness) OpenChannel(srcNode, destNode *HarnessNode, } } -// OpenPendingChannel attempts to open a channel between srcNode and destNode with the -// passed channel funding parameters. If the passed context has a timeout, then -// if the timeout is reached before the channel pending notification is -// received, an error is returned. +// OpenPendingChannel attempts to open a channel between srcNode and destNode +// with the passed channel funding parameters. If the passed context has a +// timeout, then if the timeout is reached before the channel pending +// notification is received, an error is returned. func (n *NetworkHarness) OpenPendingChannel(srcNode, destNode *HarnessNode, amt btcutil.Amount, pushAmt btcutil.Amount) (*lnrpc.PendingUpdate, error) { @@ -1046,18 +1055,19 @@ func (n *NetworkHarness) OpenPendingChannel(srcNode, destNode *HarnessNode, chanPending := make(chan *lnrpc.PendingUpdate) errChan := make(chan error) go func() { - // Consume the "channel pending" update. This waits until the node - // notifies us that the final message in the channel funding workflow - // has been sent to the remote node. + // Consume the "channel pending" update. This waits until the + // node notifies us that the final message in the channel + // funding workflow has been sent to the remote node. resp, err := respStream.Recv() if err != nil { errChan <- err return } - pendingResp, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending) + pendingResp, ok := + resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending) if !ok { - errChan <- fmt.Errorf("expected channel pending update, "+ - "instead got %v", resp) + errChan <- fmt.Errorf("expected channel pending "+ + "update, instead got %v", resp) return } @@ -1080,7 +1090,8 @@ func (n *NetworkHarness) OpenPendingChannel(srcNode, destNode *HarnessNode, // has a timeout, then if the timeout is reached before the channel has been // opened, then an error is returned. func (n *NetworkHarness) WaitForChannelOpen( - openChanStream lnrpc.Lightning_OpenChannelClient) (*lnrpc.ChannelPoint, error) { + openChanStream lnrpc.Lightning_OpenChannelClient) (*lnrpc.ChannelPoint, + error) { ctxb := context.Background() ctx, cancel := context.WithTimeout(ctxb, wait.ChannelOpenTimeout) @@ -1091,10 +1102,12 @@ func (n *NetworkHarness) WaitForChannelOpen( go func() { resp, err := openChanStream.Recv() if err != nil { - errChan <- fmt.Errorf("unable to read rpc resp: %v", err) + errChan <- fmt.Errorf("unable to read rpc resp: %w", + err) return } - fundingResp, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanOpen) + fundingResp, ok := + resp.Update.(*lnrpc.OpenStatusUpdate_ChanOpen) if !ok { errChan <- fmt.Errorf("expected channel open update, "+ "instead got %v", resp) @@ -1120,14 +1133,16 @@ func (n *NetworkHarness) WaitForChannelOpen( // has a timeout, an error is returned if that timeout is reached before the // channel close is pending. func (n *NetworkHarness) CloseChannel(lnNode *HarnessNode, - cp *lnrpc.ChannelPoint, - force bool) (lnrpc.Lightning_CloseChannelClient, *chainhash.Hash, error) { + cp *lnrpc.ChannelPoint, force bool) (lnrpc.Lightning_CloseChannelClient, + *chainhash.Hash, error) { ctxb := context.Background() // The cancel is intentionally left out here because the returned // item(close channel client) relies on the context being active. This // will be fixed once we finish refactoring the NetworkHarness. - ctx, _ := context.WithTimeout(ctxb, wait.ChannelCloseTimeout) // nolint: govet + // + // nolint: govet + ctx, _ := context.WithTimeout(ctxb, wait.ChannelCloseTimeout) // Create a channel outpoint that we can use to compare to channels // from the ListChannelsResponse. @@ -1181,7 +1196,8 @@ func (n *NetworkHarness) CloseChannel(lnNode *HarnessNode, } // Next, we'll fetch the target channel in order to get the - // harness node that will be receiving the channel close request. + // harness node that will be receiving the channel close + // request. targetChan, err := filterChannel(lnNode, chanPoint) if err != nil { return nil, nil, err @@ -1198,7 +1214,9 @@ func (n *NetworkHarness) CloseChannel(lnNode *HarnessNode, return nil, nil, fmt.Errorf("channel of closing " + "node not active in time") } - err = wait.Predicate(activeChanPredicate(receivingNode), timeout) + err = wait.Predicate( + activeChanPredicate(receivingNode), timeout, + ) if err != nil { return nil, nil, fmt.Errorf("channel of receiving " + "node not active in time") @@ -1232,7 +1250,8 @@ func (n *NetworkHarness) CloseChannel(lnNode *HarnessNode, return fmt.Errorf("unable to recv() from close "+ "stream: %v", err) } - pendingClose, ok := closeResp.Update.(*lnrpc.CloseStatusUpdate_ClosePending) + pendingClose, ok := + closeResp.Update.(*lnrpc.CloseStatusUpdate_ClosePending) if !ok { return fmt.Errorf("expected channel close update, "+ "instead got %v", pendingClose) @@ -1317,7 +1336,8 @@ func (n *NetworkHarness) AssertChannelExists(node *HarnessNode, return wait.NoError(func() error { resp, err := node.ListChannels(ctx, req) if err != nil { - return fmt.Errorf("unable fetch node's channels: %v", err) + return fmt.Errorf("unable fetch node's channels: %w", + err) } for _, channel := range resp.Channels { @@ -1498,7 +1518,8 @@ func (n *NetworkHarness) sendCoins(amt btcutil.Amount, target *HarnessNode, // the target node's unconfirmed balance reflects the expected balance // and exit. if !confirmed { - expectedBalance := btcutil.Amount(initialBalance.UnconfirmedBalance) + amt + expectedBalance := + btcutil.Amount(initialBalance.UnconfirmedBalance) + amt return target.WaitForBalance(expectedBalance, false) } diff --git a/itest/test_harness.go b/itest/test_harness.go index 837bef141..77a43ccee 100644 --- a/itest/test_harness.go +++ b/itest/test_harness.go @@ -228,7 +228,9 @@ func mineBlocksSlow(t *harnessTest, net *NetworkHarness, return blocks } -func assertTxInBlock(t *harnessTest, block *wire.MsgBlock, txid *chainhash.Hash) { +func assertTxInBlock(t *harnessTest, block *wire.MsgBlock, + txid *chainhash.Hash) { + for _, tx := range block.Transactions { sha := tx.TxHash() if bytes.Equal(txid[:], sha[:]) { diff --git a/rpcmiddleware/config.go b/rpcmiddleware/config.go index c1aecc82d..d8ae3fc00 100644 --- a/rpcmiddleware/config.go +++ b/rpcmiddleware/config.go @@ -10,7 +10,7 @@ const ( // Config is the configuration struct for the RPC middleware. // -//nolint:lll +//nolint:ll type Config struct { Disabled bool `long:"disabled" description:"Disable the RPC middleware"` InterceptTimeout time.Duration `long:"intercept-timeout" description:"The maximum time the RPC middleware is allowed to take for intercepting each RPC request"` diff --git a/rpcmiddleware/interface.go b/rpcmiddleware/interface.go index 159d7d8a2..25bc02bae 100644 --- a/rpcmiddleware/interface.go +++ b/rpcmiddleware/interface.go @@ -431,8 +431,8 @@ func validateRequestCheckHandler(typedHandlerType reflect.Type, "with a sub type of context.Context") } if !typedHandlerType.In(1).ConvertibleTo(requestType) { - return fmt.Errorf("request handler must have second parameter " + - "with a sub type of proto.Message") + return fmt.Errorf("request handler must have second " + + "parameter with a sub type of proto.Message") } if typedHandlerType.Out(0) != errorType { return fmt.Errorf("request handler must return exactly one " + @@ -459,8 +459,8 @@ func validateMessageHandler(typedHandlerType reflect.Type, "with a sub type of context.Context") } if !typedHandlerType.In(1).ConvertibleTo(messageType) { - return fmt.Errorf("message handler must have second parameter " + - "with a sub type of proto.Message") + return fmt.Errorf("message handler must have second " + + "parameter with a sub type of proto.Message") } outType0 := typedHandlerType.Out(0) pmt := protoMessageType diff --git a/rules/channel_restrictions.go b/rules/channel_restrictions.go index c6cb134d8..52c976799 100644 --- a/rules/channel_restrictions.go +++ b/rules/channel_restrictions.go @@ -212,8 +212,8 @@ func (c *ChannelRestrictEnforcer) HandleRequest(ctx context.Context, uri string, // rpcmiddleware.RoundTripCheckers. // // NOTE: this is part of the Enforcer interface. -func (c *ChannelRestrictEnforcer) HandleResponse(ctx context.Context, uri string, - msg proto.Message) (proto.Message, error) { +func (c *ChannelRestrictEnforcer) HandleResponse(ctx context.Context, + uri string, msg proto.Message) (proto.Message, error) { checkers := c.checkers() if checkers == nil { diff --git a/rules/history_limit.go b/rules/history_limit.go index be2894f42..1ef44a56f 100644 --- a/rules/history_limit.go +++ b/rules/history_limit.go @@ -240,8 +240,8 @@ func (h *HistoryLimit) ToProto() *litrpc.RuleValue { } } -// GetStartDate is a helper function that determines the start date of the values -// given if a start date is set or a max duration is given. +// GetStartDate is a helper function that determines the start date of the +// values given if a start date is set or a max duration is given. func (h *HistoryLimit) GetStartDate() time.Time { startDate := h.StartDate if h.StartDate.IsZero() { diff --git a/rules/history_limit_test.go b/rules/history_limit_test.go index eb1dacc52..b3db5ad9c 100644 --- a/rules/history_limit_test.go +++ b/rules/history_limit_test.go @@ -76,7 +76,8 @@ func TestHistoryLimitCheckers(t *testing.T) { require.NoError(t, err) // The ForwardingHistory request has a StartTime parameter. The request - // should be allowed if the parameter is ok given the HistoryLimit values. + // should be allowed if the parameter is ok given the HistoryLimit + // values. _, err = values.HandleRequest( ctx, "/lnrpc.Lightning/ForwardingHistory", &lnrpc.ForwardingHistoryRequest{ @@ -87,7 +88,8 @@ func TestHistoryLimitCheckers(t *testing.T) { // And it should be denied if it violates the values. // The ForwardingHistory request has a StartTime parameter. The request - // should be allowed if the parameter is ok given the HistoryLimit values. + // should be allowed if the parameter is ok given the HistoryLimit + // values. _, err = values.HandleRequest( ctx, "/lnrpc.Lightning/ForwardingHistory", &lnrpc.ForwardingHistoryRequest{ @@ -99,9 +101,9 @@ func TestHistoryLimitCheckers(t *testing.T) { require.Error(t, err) // The ListInvoices function does not have a StartTime parameter and - // so the HistoryLimit values needs to alter the _response_ of this query - // instead to only include the invoices created after the HistoryLimit - // start date. + // so the HistoryLimit values needs to alter the _response_ of this + // query instead to only include the invoices created after the + // HistoryLimit start date. invoices := []*lnrpc.Invoice{ {CreationDate: time.Now().Unix()}, {CreationDate: time.Now().Add(-time.Hour * 5).Unix()}, diff --git a/rules/onchain_budget.go b/rules/onchain_budget.go index 783e3a664..1a6160e31 100644 --- a/rules/onchain_budget.go +++ b/rules/onchain_budget.go @@ -255,7 +255,8 @@ func (o *OnChainBudgetEnforcer) checkers() map[string]mid.RoundTripChecker { "/lnrpc.Lightning/ListChannels": mid.NewResponseRewriter( &lnrpc.ListChannelsRequest{}, &lnrpc.ListChannelsResponse{}, - func(ctx context.Context, r *lnrpc.ListChannelsResponse) ( + func(ctx context.Context, + r *lnrpc.ListChannelsResponse) ( proto.Message, error) { // We remove any potentially added memos for @@ -271,7 +272,8 @@ func (o *OnChainBudgetEnforcer) checkers() map[string]mid.RoundTripChecker { "/lnrpc.Lightning/PendingChannels": mid.NewResponseRewriter( &lnrpc.PendingChannelsRequest{}, &lnrpc.PendingChannelsResponse{}, - func(ctx context.Context, r *lnrpc.PendingChannelsResponse) ( + func(ctx context.Context, + r *lnrpc.PendingChannelsResponse) ( proto.Message, error) { // We remove any potentially added memos for @@ -288,6 +290,7 @@ func (o *OnChainBudgetEnforcer) checkers() map[string]mid.RoundTripChecker { ) } + // nolint:ll for _, c := range r.PendingForceClosingChannels { c.Channel.Memo = removeReqId( c.Channel.Memo, diff --git a/rules/onchain_budget_test.go b/rules/onchain_budget_test.go index 67f308999..24e763967 100644 --- a/rules/onchain_budget_test.go +++ b/rules/onchain_budget_test.go @@ -380,10 +380,12 @@ func TestHandleMemoResponse(t *testing.T) { }, ) require.NoError(t, err) + // nolint:ll for _, channel := range response.(*lnrpc.ListChannelsResponse).Channels { assertMemoInvariant(channel.Memo) } + // nolint:ll response, err = enf.HandleResponse( ctx, "/lnrpc.Lightning/PendingChannels", &lnrpc.PendingChannelsResponse{ diff --git a/rules/rate_limit_test.go b/rules/rate_limit_test.go index 1f291d291..c291bb4e6 100644 --- a/rules/rate_limit_test.go +++ b/rules/rate_limit_test.go @@ -180,8 +180,8 @@ func TestRateLimitCheckRequest(t *testing.T) { // Now we add a more recent write action to the DB. db.addAction("write-uri", time.Now()) - // Since the rate limit values only allows one write action per 24 hours, - // a request for another write action should not be allowed. + // Since the rate limit values only allows one write action per 24 + // hours, a request for another write action should not be allowed. _, err = enf.HandleRequest(ctx, "write-uri", nil) require.Error(t, err) @@ -227,8 +227,8 @@ func (m *mockRateLimitCfg) GetMethodPerms() func(string) ([]bakery.Op, bool) { } } -// mockActionsDB is used to mock the action's db backend used by the RateLimitMgr -// values. +// mockActionsDB is used to mock the action's db backend used by the +// RateLimitMgr values. type mockActionsDB struct { actions []*firewalldb.RuleAction } diff --git a/session/kvdb_store.go b/session/kvdb_store.go index da58ebf75..a073bf952 100644 --- a/session/kvdb_store.go +++ b/session/kvdb_store.go @@ -22,6 +22,8 @@ var ( // information about sessions. These sessions are indexed by their // public key. // + // nolint:ll + // // The session bucket has the following structure: // session -> -> // -> id-index -> -> key -> diff --git a/session/migration1/tlv.go b/session/migration1/tlv.go index 54105a87d..72a19580f 100644 --- a/session/migration1/tlv.go +++ b/session/migration1/tlv.go @@ -219,9 +219,8 @@ func DeserializeSession(r io.Reader) (*Session, error) { } if t, ok := parsedTypes[typeLocalPrivateKey]; ok && t == nil { - session.LocalPrivateKey, session.LocalPublicKey = btcec.PrivKeyFromBytes( - privateKey, - ) + session.LocalPrivateKey, session.LocalPublicKey = + btcec.PrivKeyFromBytes(privateKey) } if t, ok := parsedTypes[typeFeaturesConfig]; ok && t == nil { diff --git a/session/migration2/id_to_group_index.go b/session/migration2/id_to_group_index.go index 382a352fd..f1e55ce22 100644 --- a/session/migration2/id_to_group_index.go +++ b/session/migration2/id_to_group_index.go @@ -13,6 +13,8 @@ var ( // information about sessions. These sessions are indexed by their // public key. // + // nolint:ll + // // The session bucket has the following structure: // session -> -> // -> id-index -> -> key -> @@ -54,7 +56,8 @@ var ( ) // MigrateSessionIDToGroupIndex back-fills the session ID to group index so that -// it has an entry for all sessions that the session store is currently aware of. +// it has an entry for all sessions that the session store is currently aware +// of. func MigrateSessionIDToGroupIndex(tx *bbolt.Tx) error { sessionBucket := tx.Bucket(sessionBucketKey) if sessionBucket == nil { diff --git a/session/migtest/raw_db.go b/session/migtest/raw_db.go index 7227e41bb..efb008819 100644 --- a/session/migtest/raw_db.go +++ b/session/migtest/raw_db.go @@ -58,7 +58,9 @@ func dumpBucket(bucket *bbolt.Bucket) error { } // RestoreDB primes the database with the given data set. -func RestoreDB(tx *bbolt.Tx, rootKey []byte, data map[string]interface{}) error { +func RestoreDB(tx *bbolt.Tx, rootKey []byte, + data map[string]interface{}) error { + bucket, err := tx.CreateBucket(rootKey) if err != nil { return err diff --git a/session/sql_migration_test.go b/session/sql_migration_test.go index fb8763808..0e79b3ba9 100644 --- a/session/sql_migration_test.go +++ b/session/sql_migration_test.go @@ -968,7 +968,8 @@ func randomAccountOptions(ctx context.Context, t *testing.T, func randomBytes(n int) []byte { b := make([]byte, n) for i := range b { - b[i] = byte(rand.Intn(256)) // Random int between 0-255, then cast to byte + // Random int between 0-255, then cast to byte. + b[i] = byte(rand.Intn(256)) } return b } diff --git a/session/sql_store.go b/session/sql_store.go index a82fd0e63..a9c53fd6e 100644 --- a/session/sql_store.go +++ b/session/sql_store.go @@ -20,6 +20,8 @@ import ( // SQLQueries is a subset of the sqlc.Queries interface that can be used to // interact with session related tables. +// +// nolint:ll type SQLQueries interface { GetAliasBySessionID(ctx context.Context, id int64) ([]byte, error) GetSessionByID(ctx context.Context, id int64) (sqlc.Session, error) @@ -140,7 +142,8 @@ func (s *SQLStore) NewSession(ctx context.Context, label string, typ Type, } }) if err != nil { - return fmt.Errorf("unable to convert account ID: %w", err) + return fmt.Errorf("unable to convert account ID: %w", + err) } localKey := sess.LocalPublicKey.SerializeCompressed() @@ -216,6 +219,7 @@ func (s *SQLStore) NewSession(ctx context.Context, label string, typ Type, // Write mac perms and caveats. if sess.MacaroonRecipe != nil { for _, perm := range sess.MacaroonRecipe.Permissions { + // nolint:ll err := db.InsertSessionMacaroonPermission( ctx, sqlc.InsertSessionMacaroonPermissionParams{ SessionID: dbID, @@ -230,6 +234,7 @@ func (s *SQLStore) NewSession(ctx context.Context, label string, typ Type, } for _, caveat := range sess.MacaroonRecipe.Caveats { + // nolint:ll err := db.InsertSessionMacaroonCaveat( ctx, sqlc.InsertSessionMacaroonCaveatParams{ SessionID: dbID, @@ -253,6 +258,7 @@ func (s *SQLStore) NewSession(ctx context.Context, label string, typ Type, // Write feature configs. if sess.FeatureConfig != nil { for featureName, config := range *sess.FeatureConfig { + // nolint:ll err := db.InsertSessionFeatureConfig( ctx, sqlc.InsertSessionFeatureConfigParams{ SessionID: dbID, @@ -606,7 +612,8 @@ func (s *SQLStore) GetSession(ctx context.Context, alias ID) (*Session, error) { return sess, err } -// GetGroupID will return the legacy group Alias for the given legacy session Alias. +// GetGroupID will return the legacy group Alias for the given legacy session +// Alias. // // NOTE: This is part of the AliasToGroupIndex interface. func (s *SQLStore) GetGroupID(ctx context.Context, sessionID ID) (ID, error) { @@ -728,7 +735,8 @@ func unmarshalSession(ctx context.Context, db SQLQueries, accountAlias, err := accounts.AccountIDFromInt64(account.Alias) if err != nil { - return nil, fmt.Errorf("unable to get account ID: %v", err) + return nil, fmt.Errorf("unable to get account ID: %v", + err) } acctAlias = fn.Some(accountAlias) } @@ -837,7 +845,9 @@ func unmarshalMacPerms(dbPerms []sqlc.SessionMacaroonPermission) []bakery.Op { return ops } -func unmarshalMacCaveats(dbCaveats []sqlc.SessionMacaroonCaveat) []macaroon.Caveat { +func unmarshalMacCaveats( + dbCaveats []sqlc.SessionMacaroonCaveat) []macaroon.Caveat { + caveats := make([]macaroon.Caveat, len(dbCaveats)) for i, dbCaveat := range dbCaveats { caveats[i] = macaroon.Caveat{ @@ -850,7 +860,9 @@ func unmarshalMacCaveats(dbCaveats []sqlc.SessionMacaroonCaveat) []macaroon.Cave return caveats } -func unmarshalFeatureConfigs(dbConfigs []sqlc.SessionFeatureConfig) *FeaturesConfig { +func unmarshalFeatureConfigs( + dbConfigs []sqlc.SessionFeatureConfig) *FeaturesConfig { + configs := make(FeaturesConfig, len(dbConfigs)) for _, dbConfig := range dbConfigs { configs[dbConfig.FeatureName] = dbConfig.Config diff --git a/session/tlv.go b/session/tlv.go index d8817c1cd..cb799c28d 100644 --- a/session/tlv.go +++ b/session/tlv.go @@ -261,9 +261,8 @@ func DeserializeSession(r io.Reader) (*Session, error) { } if t, ok := parsedTypes[typeLocalPrivateKey]; ok && t == nil { - session.LocalPrivateKey, session.LocalPublicKey = btcec.PrivKeyFromBytes( - privateKey, - ) + session.LocalPrivateKey, session.LocalPublicKey = + btcec.PrivKeyFromBytes(privateKey) } if t, ok := parsedTypes[typeFeaturesConfig]; ok && t == nil { diff --git a/session_rpcserver.go b/session_rpcserver.go index 01e1be9d9..083852ee4 100644 --- a/session_rpcserver.go +++ b/session_rpcserver.go @@ -240,7 +240,8 @@ func (s *sessionRpcServer) start(ctx context.Context, if perm { err := s.cfg.db.ShiftState( - ctx, sess.ID, session.StateRevoked, + ctx, sess.ID, + session.StateRevoked, ) if err != nil { log.Errorf("error revoking "+ @@ -383,9 +384,9 @@ func (s *sessionRpcServer) AddSession(ctx context.Context, // No other types are currently supported. default: return nil, fmt.Errorf("invalid session type, only admin, " + - "readonly, custom and account macaroon types supported in " + - "LiT. Autopilot sessions must be added using " + - "AddAutoPilotSession method") + "readonly, custom and account macaroon types " + + "supported in LiT. Autopilot sessions must be added " + + "using AddAutoPilotSession method") } // Collect the de-duped permissions. @@ -651,7 +652,8 @@ func (s *sessionRpcServer) ListSessions(ctx context.Context, // RevokeSession revokes a single session and also stops it if it is currently // active. func (s *sessionRpcServer) RevokeSession(ctx context.Context, - req *litrpc.RevokeSessionRequest) (*litrpc.RevokeSessionResponse, error) { + req *litrpc.RevokeSessionRequest) (*litrpc.RevokeSessionResponse, + error) { pubKey, err := btcec.ParsePubKey(req.LocalPublicKey) if err != nil { @@ -1576,6 +1578,7 @@ func (s *sessionRpcServer) marshalRPCSession(ctx context.Context, accountID = hex.EncodeToString(id[:]) }) + // nolint:ll return &litrpc.Session{ Id: sess.ID[:], Label: sess.Label, diff --git a/subservers/config.go b/subservers/config.go index c4bddfeed..d399e9067 100644 --- a/subservers/config.go +++ b/subservers/config.go @@ -5,7 +5,7 @@ import "github.com/lightningnetwork/lnd/build" // RemoteConfig holds the configuration parameters that are needed when running // LiT in the "remote" lnd mode. // -//nolint:lll +//nolint:ll type RemoteConfig struct { LitLogDir string `long:"lit-logdir" description:"For lnd remote mode only: Directory to log output."` LitMaxLogFiles int `long:"lit-maxlogfiles" description:"For lnd remote mode only: Maximum logfiles to keep (0 for no rotation). DEPRECATED: use --logging.file.max-files instead" hidden:"true"` @@ -25,7 +25,7 @@ type RemoteConfig struct { // RemoteDaemonConfig holds the configuration parameters that are needed to // connect to a remote daemon like lnd for example. // -//nolint:lll +//nolint:ll type RemoteDaemonConfig struct { // RPCServer is host:port that the remote daemon's RPC server is // listening on. diff --git a/terminal.go b/terminal.go index 992bcaf7b..498be4697 100644 --- a/terminal.go +++ b/terminal.go @@ -534,6 +534,7 @@ func (g *LightningTerminal) start(ctx context.Context) error { auxComponents = *components } + // nolint:ll implCfg := &lnd.ImplementationCfg{ GrpcRegistrar: g, RestRegistrar: g, @@ -1074,7 +1075,8 @@ func (g *LightningTerminal) startInternalSubServers(ctx context.Context, db: g.stores.sessions, basicAuth: g.rpcProxy.basicAuth, grpcOptions: []grpc.ServerOption{ - grpc.CustomCodec(grpcProxy.Codec()), // nolint: staticcheck, + // nolint:staticcheck, + grpc.CustomCodec(grpcProxy.Codec()), grpc.ChainStreamInterceptor( g.rpcProxy.StreamServerInterceptor, ), @@ -2060,6 +2062,7 @@ func (g *LightningTerminal) showStartupInfo(ctx context.Context) error { webInterfaceString = "disabled" } + // nolint:ll str := "" + "----------------------------------------------------------\n" + " Lightning Terminal (LiT) by Lightning Labs \n" + diff --git a/tools/.custom-gcl.yml b/tools/.custom-gcl.yml new file mode 100644 index 000000000..38332c00a --- /dev/null +++ b/tools/.custom-gcl.yml @@ -0,0 +1,4 @@ +version: v1.64.6 +plugins: + - module: 'github.com/lightninglabs/lightning-terminal/tools/linters' + path: ./linters diff --git a/tools/Dockerfile b/tools/Dockerfile index f4a20c3b5..f5eb90436 100644 --- a/tools/Dockerfile +++ b/tools/Dockerfile @@ -11,7 +11,9 @@ RUN cd /tmp \ && mkdir -p /tmp/build/.cache \ && mkdir -p /tmp/build/.modcache \ && cd /tmp/tools \ - && go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint \ + && CGO_ENABLED=0 go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint \ + && CGO_ENABLED=0 golangci-lint custom \ + && mv ./custom-gcl /usr/local/bin/custom-gcl \ && chmod -R 777 /tmp/build/ WORKDIR /build diff --git a/tools/linters/go.mod b/tools/linters/go.mod new file mode 100644 index 000000000..e73a11500 --- /dev/null +++ b/tools/linters/go.mod @@ -0,0 +1,15 @@ +module github.com/lightninglabs/lightning-terminal/tools/linters + +go 1.24.9 + +require ( + github.com/golangci/plugin-module-register v0.1.1 + github.com/stretchr/testify v1.10.0 + golang.org/x/tools v0.30.0 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/tools/linters/go.sum b/tools/linters/go.sum new file mode 100644 index 000000000..1d2bc3d25 --- /dev/null +++ b/tools/linters/go.sum @@ -0,0 +1,14 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c= +github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/tools/linters/ll.go b/tools/linters/ll.go new file mode 100644 index 000000000..7170447de --- /dev/null +++ b/tools/linters/ll.go @@ -0,0 +1,266 @@ +// The following code is based on code from GolangCI. +// Source: https://github.com/golangci-lint/pkg/golinters/lll/lll.go +// License: GNU + +package linters + +import ( + "bufio" + "errors" + "fmt" + "go/ast" + "go/token" + "os" + "path/filepath" + "regexp" + "strings" + "unicode/utf8" + + "github.com/golangci/plugin-module-register/register" + "golang.org/x/tools/go/analysis" +) + +const ( + linterName = "ll" + goCommentDirectivePrefix = "//go:" + + defaultMaxLineLen = 80 + defaultTabWidthInSpaces = 8 + defaultLogRegex = `^\s*.*(L|l)og\.` +) + +// LLConfig is the configuration for the ll linter. +type LLConfig struct { + LineLength int `json:"line-length"` + TabWidth int `json:"tab-width"` + LogRegex string `json:"log-regex"` +} + +// New creates a new LLPlugin from the given settings. It satisfies the +// signature required by the golangci-lint linter for plugins. +func New(settings any) (register.LinterPlugin, error) { + cfg, err := register.DecodeSettings[LLConfig](settings) + if err != nil { + return nil, err + } + + // Fill in default config values if they are not set. + if cfg.LineLength == 0 { + cfg.LineLength = defaultMaxLineLen + } + if cfg.TabWidth == 0 { + cfg.TabWidth = defaultTabWidthInSpaces + } + if cfg.LogRegex == "" { + cfg.LogRegex = defaultLogRegex + } + + return &LLPlugin{cfg: cfg}, nil +} + +// LLPlugin is a golangci-linter plugin that can be used to check that code line +// lengths do not exceed a certain limit. +type LLPlugin struct { + cfg LLConfig +} + +// BuildAnalyzers creates the analyzers for the ll linter. +// +// NOTE: This is part of the register.LinterPlugin interface. +func (l *LLPlugin) BuildAnalyzers() ([]*analysis.Analyzer, error) { + return []*analysis.Analyzer{ + { + Name: linterName, + Doc: "Reports long lines", + Run: l.run, + }, + }, nil +} + +// GetLoadMode returns the load mode for the ll linter. +// +// NOTE: This is part of the register.LinterPlugin interface. +func (l *LLPlugin) GetLoadMode() string { + return register.LoadModeSyntax +} + +func (l *LLPlugin) run(pass *analysis.Pass) (any, error) { + var ( + spaces = strings.Repeat(" ", l.cfg.TabWidth) + logRegex = regexp.MustCompile(l.cfg.LogRegex) + ) + + for _, f := range pass.Files { + fileName := getFileName(pass, f) + + issues, err := getLLLIssuesForFile( + fileName, l.cfg.LineLength, spaces, logRegex, + ) + if err != nil { + return nil, err + } + + file := pass.Fset.File(f.Pos()) + for _, issue := range issues { + pos := file.LineStart(issue.pos.Line) + + pass.Report(analysis.Diagnostic{ + Pos: pos, + End: 0, + Category: linterName, + Message: issue.text, + }) + } + + } + + return nil, nil +} + +type issue struct { + pos token.Position + text string +} + +func getLLLIssuesForFile(filename string, maxLineLen int, + tabSpaces string, logRegex *regexp.Regexp) ([]*issue, error) { + + f, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("can't open file %s: %w", filename, err) + } + defer f.Close() + + var ( + res []*issue + lineNumber int + multiImportEnabled bool + multiLinedLog bool + ) + + // Scan over each line. + scanner := bufio.NewScanner(f) + for scanner.Scan() { + lineNumber++ + + // Replace all tabs with spaces. + line := scanner.Text() + line = strings.ReplaceAll(line, "\t", tabSpaces) + + // Ignore any //go: directives since these cant be wrapped onto + // a new line. + if strings.HasPrefix(line, goCommentDirectivePrefix) { + continue + } + + // We never want the linter to run on imports since these cannot + // be wrapped onto a new line. If this is a single line import + // we can skip the line entirely. If this is a multi-line import + // skip until the closing bracket. + // + // NOTE: We trim the line space around the line here purely for + // the purpose of being able to test this part of the linter + // without the risk of the `gosimports` tool reformatting the + // test case and removing the import. + if strings.HasPrefix(strings.TrimSpace(line), "import") { + multiImportEnabled = strings.HasSuffix(line, "(") + continue + } + + // If we have marked the start of a multi-line import, we should + // skip until the closing bracket of the import block. + if multiImportEnabled { + if line == ")" { + multiImportEnabled = false + } + + continue + } + + // Check if the line matches the log pattern. + if logRegex.MatchString(line) { + multiLinedLog = !strings.HasSuffix(line, ")") + continue + } + + if multiLinedLog { + // Check for the end of a multiline log call. + if strings.HasSuffix(line, ")") { + multiLinedLog = false + } + + continue + } + + // Otherwise, we can check the length of the line and report if + // it exceeds the maximum line length. + lineLen := utf8.RuneCountInString(line) + if lineLen > maxLineLen { + res = append(res, &issue{ + pos: token.Position{ + Filename: filename, + Line: lineNumber, + }, + text: fmt.Sprintf("the line is %d "+ + "characters long, which exceeds the "+ + "maximum of %d characters.", lineLen, + maxLineLen), + }) + } + } + + if err := scanner.Err(); err != nil { + if errors.Is(err, bufio.ErrTooLong) && + maxLineLen < bufio.MaxScanTokenSize { + + // scanner.Scan() might fail if the line is longer than + // bufio.MaxScanTokenSize. In the case where the + // specified maxLineLen is smaller than + // bufio.MaxScanTokenSize we can return this line as a + // long line instead of returning an error. The reason + // for this change is that this case might happen with + // autogenerated files. The go-bindata tool for instance + // might generate a file with a very long line. In this + // case, as it's an auto generated file, the warning + // returned by lll will be ignored. + // But if we return a linter error here, and this error + // happens for an autogenerated file the error will be + // discarded (fine), but all the subsequent errors for + // lll will be discarded for other files, and we'll miss + // legit error. + res = append(res, &issue{ + pos: token.Position{ + Filename: filename, + Line: lineNumber, + Column: 1, + }, + text: fmt.Sprintf("line is more than "+ + "%d characters", + bufio.MaxScanTokenSize), + }) + } else { + return nil, fmt.Errorf("can't scan file %s: %w", + filename, err) + } + } + + return res, nil +} + +func getFileName(pass *analysis.Pass, file *ast.File) string { + fileName := pass.Fset.PositionFor(file.Pos(), true).Filename + ext := filepath.Ext(fileName) + if ext != "" && ext != ".go" { + // The position has been adjusted to a non-go file, + // revert to original file. + position := pass.Fset.PositionFor(file.Pos(), false) + fileName = position.Filename + } + + return fileName +} + +func init() { + // Register the linter with the plugin module register. + register.Plugin(linterName, New) +} diff --git a/tools/linters/ll_test.go b/tools/linters/ll_test.go new file mode 100644 index 000000000..1ab4ae352 --- /dev/null +++ b/tools/linters/ll_test.go @@ -0,0 +1,155 @@ +package linters + +import ( + "os" + "regexp" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +// TestGetLLLIssuesForFile tests the line-too-long linter. +// +//nolint:ll +func TestGetLLLIssuesForFile(t *testing.T) { + // Test data + testCases := []struct { + name string + content string + logRegex string + expectedIssue []string + }{ + { + name: "Single long line", + content: ` + fmt.Println("This is a very long line that exceeds the maximum length and should be flagged by the linter.")`, + logRegex: defaultLogRegex, + expectedIssue: []string{ + "the line is 140 characters long, which " + + "exceeds the maximum of 80 characters.", + }, + }, + { + name: "Multiple long lines", + content: ` + fmt.Println("This is a very long line that exceeds the maximum length and should be flagged by the linter.") + fmt.Println("This is a another very long line that exceeds the maximum length and should be flagged by the linter.")`, + logRegex: defaultLogRegex, + expectedIssue: []string{ + "the line is 140 characters long, which " + + "exceeds the maximum of 80 characters.", + "the line is 148 characters long, which " + + "exceeds the maximum of 80 characters.", + }, + }, + { + name: "Short lines", + logRegex: defaultLogRegex, + content: ` + fmt.Println("Short line")`, + }, + { + name: "Directive ignored", + logRegex: defaultLogRegex, + content: `//go:generate something very very very very very very very very very long and complex here wowowow`, + }, + { + name: "Long single line import", + logRegex: defaultLogRegex, + content: `import "github.com/lightningnetwork/lnd/lnrpc/walletrpc/more/more/more/more/more/more/ok/that/is/enough"`, + }, + { + name: "Multi-line import", + logRegex: defaultLogRegex, + content: ` + import ( + "os" + "fmt" + "github.com/lightningnetwork/lnd/lnrpc/walletrpc/more/ok/that/is/enough" + )`, + }, + { + name: "Long single line log", + logRegex: defaultLogRegex, + content: ` + log.Infof("This is a very long log line but since it is a log line, it should be skipped by the linter."), + rpcLog.Info("Another long log line with a slightly different name and should still be skipped")`, + }, + { + name: "Long single line log followed by a non-log line", + logRegex: defaultLogRegex, + content: ` + log.Infof("This is a very long log line but since it is a log line, it should be skipped by the linter.") + fmt.Println("This is a very long line that exceeds the maximum length and should be flagged by the linter.")`, + expectedIssue: []string{ + "the line is 140 characters long, which " + + "exceeds the maximum of 80 characters.", + }, + }, + { + name: "Multi-line log", + logRegex: defaultLogRegex, + content: ` + log.Infof("This is a very long log line but + since it is a log line, it + should be skipped by the linter.")`, + }, + { + name: "Multi-line log followed by a non-log line", + logRegex: defaultLogRegex, + content: ` + log.Infof("This is a very long log line but + since it is a log line, it + should be skipped by the linter.") + fmt.Println("This is a very long line that + exceeds the maximum length and + should be flagged by the linter.")`, + expectedIssue: []string{ + "the line is 82 characters long, which " + + "exceeds the maximum of 80 characters.", + }, + }, + { + name: "Only skip 'S' logs", + logRegex: `^\s*.*(L|l)og\.(Info|Debug|Trace|Warn|Error|Critical)S\(`, + content: ` + log.Infof("A long log line but it is not an S log and so should be caught") + log.InfoS("This is a very long log line but + since it is an 'S' log line, it + should be skipped by the linter.") + log.TraceS("Another S log that should be skipped by the linter")`, + expectedIssue: []string{ + "the line is 107 characters long, which " + + "exceeds the maximum of 80 characters.", + }, + }, + } + + tabSpaces := strings.Repeat(" ", defaultTabWidthInSpaces) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + logRegex := regexp.MustCompile(tc.logRegex) + + // Write content to a temporary file. + tmpFile := t.TempDir() + "/test.go" + err := os.WriteFile(tmpFile, []byte(tc.content), 0644) + require.NoError(t, err) + + // Run the linter on the file. + issues, err := getLLLIssuesForFile( + tmpFile, defaultMaxLineLen, tabSpaces, logRegex, + ) + require.NoError(t, err) + + require.Len(t, issues, len(tc.expectedIssue)) + + for i, issue := range issues { + require.Equal( + t, tc.expectedIssue[i], issue.text, + ) + } + }) + } +} diff --git a/version.go b/version.go index 7004a1fa5..c868e7289 100644 --- a/version.go +++ b/version.go @@ -27,7 +27,8 @@ var CommitHash string var Dirty string // semanticAlphabet -const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-." +const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmn" + + "opqrstuvwxyz-." // These constants define the application version and follow the semantic // versioning 2.0.0 spec (http://semver.org/).