From ddfffcbea4b0709cf4563b3ed9ac9060324c7ec3 Mon Sep 17 00:00:00 2001 From: Pi-Cla Date: Tue, 7 May 2024 13:31:56 -0600 Subject: [PATCH] Apply go lints --- processors/agencyduplicateremover.go | 9 ++++----- processors/frequencyminizer.go | 7 ++++--- processors/idminimizer.go | 13 +++++++------ processors/routeduplicateremover.go | 9 ++++----- processors/servicecaldatesremover.go | 7 ++++--- processors/serviceduplicateremover.go | 13 +++++++------ processors/serviceminimizer.go | 9 +++++---- processors/servicenonoverlapper.go | 19 ++++++++++--------- processors/shapeduplicateremover.go | 9 +++++---- processors/shapeidx.go | 5 +++-- processors/shapesnapper.go | 8 +++++--- processors/tripduplicateremover.go | 21 +++++++-------------- 12 files changed, 65 insertions(+), 64 deletions(-) diff --git a/processors/agencyduplicateremover.go b/processors/agencyduplicateremover.go index 93e66d0..f3afcf3 100755 --- a/processors/agencyduplicateremover.go +++ b/processors/agencyduplicateremover.go @@ -8,10 +8,11 @@ package processors import ( "fmt" - "github.com/public-transport/gtfsparser" - gtfs "github.com/public-transport/gtfsparser/gtfs" "hash/fnv" "os" + + "github.com/public-transport/gtfsparser" + gtfs "github.com/public-transport/gtfsparser/gtfs" ) // AgencyDuplicateRemover merges semantically equivalent routes @@ -116,9 +117,7 @@ func (adr *AgencyDuplicateRemover) combineAgencies(feed *gtfsparser.Feed, agenci } } - for _, attr := range a.Attributions { - ref.Attributions = append(ref.Attributions, attr) - } + ref.Attributions = append(ref.Attributions, a.Attributions...) for _, fa := range fareattrs[a] { if fa.Agency == a { diff --git a/processors/frequencyminizer.go b/processors/frequencyminizer.go index aa5cc1f..58054e1 100755 --- a/processors/frequencyminizer.go +++ b/processors/frequencyminizer.go @@ -8,13 +8,14 @@ package processors import ( "fmt" - "github.com/public-transport/gtfsparser" - gtfs "github.com/public-transport/gtfsparser/gtfs" "math" "os" "sort" "strconv" "sync" + + "github.com/public-transport/gtfsparser" + gtfs "github.com/public-transport/gtfsparser/gtfs" ) // FrequencyMinimizer minimizes trips, stop_times and frequencies by searching optimal covers for trip times. @@ -123,7 +124,7 @@ func (m FrequencyMinimizer) Run(feed *gtfsparser.Feed) { curTrip = new(gtfs.Trip) var newID string - for true { + for { newID = t.Id + "_" + strconv.FormatInt(int64(suffixC), 10) if _, in := feed.Trips[newID]; in { suffixC++ diff --git a/processors/idminimizer.go b/processors/idminimizer.go index abe611b..25e9781 100755 --- a/processors/idminimizer.go +++ b/processors/idminimizer.go @@ -8,10 +8,11 @@ package processors import ( "fmt" - "github.com/public-transport/gtfsparser" - gtfs "github.com/public-transport/gtfsparser/gtfs" "os" "strconv" + + "github.com/public-transport/gtfsparser" + gtfs "github.com/public-transport/gtfsparser/gtfs" ) // IDMinimizer minimizes IDs by replacing them be continuous integer @@ -246,14 +247,14 @@ func (minimizer IDMinimizer) minimizeStopIds(feed *gtfsparser.Feed) { func (minimizer IDMinimizer) minimizeAttributionIds(feed *gtfsparser.Feed) { var idCount int64 = 1 - for i, _ := range feed.Attributions { + for i := range feed.Attributions { newId := minimizer.Prefix + strconv.FormatInt(idCount, minimizer.Base) feed.Attributions[i].Id = newId idCount = idCount + 1 } for _, ag := range feed.Agencies { - for i, _ := range ag.Attributions { + for i := range ag.Attributions { newId := minimizer.Prefix + strconv.FormatInt(idCount, minimizer.Base) ag.Attributions[i].Id = newId idCount = idCount + 1 @@ -261,7 +262,7 @@ func (minimizer IDMinimizer) minimizeAttributionIds(feed *gtfsparser.Feed) { } for _, r := range feed.Routes { - for i, _ := range r.Attributions { + for i := range r.Attributions { newId := minimizer.Prefix + strconv.FormatInt(idCount, minimizer.Base) r.Attributions[i].Id = newId idCount = idCount + 1 @@ -272,7 +273,7 @@ func (minimizer IDMinimizer) minimizeAttributionIds(feed *gtfsparser.Feed) { if t.Attributions == nil { continue } - for i, _ := range *t.Attributions { + for i := range *t.Attributions { newId := minimizer.Prefix + strconv.FormatInt(idCount, minimizer.Base) (*t.Attributions)[i].Id = newId idCount = idCount + 1 diff --git a/processors/routeduplicateremover.go b/processors/routeduplicateremover.go index d60b13c..e4e5c39 100755 --- a/processors/routeduplicateremover.go +++ b/processors/routeduplicateremover.go @@ -9,11 +9,12 @@ package processors import ( "encoding/binary" "fmt" - "github.com/public-transport/gtfsparser" - gtfs "github.com/public-transport/gtfsparser/gtfs" "hash/fnv" "os" "unsafe" + + "github.com/public-transport/gtfsparser" + gtfs "github.com/public-transport/gtfsparser/gtfs" ) // RouteDuplicateRemover merges semantically equivalent routes @@ -187,9 +188,7 @@ func (rdr RouteDuplicateRemover) combineRoutes(feed *gtfsparser.Feed, routes []* } } - for _, attr := range r.Attributions { - ref.Attributions = append(ref.Attributions, attr) - } + ref.Attributions = append(ref.Attributions, r.Attributions...) // delete every fare rule that contains this route for _, fa := range feed.FareAttributes { diff --git a/processors/servicecaldatesremover.go b/processors/servicecaldatesremover.go index e742502..0b9ba5b 100755 --- a/processors/servicecaldatesremover.go +++ b/processors/servicecaldatesremover.go @@ -9,9 +9,10 @@ package processors import ( "errors" "fmt" + "os" + "github.com/public-transport/gtfsparser" gtfs "github.com/public-transport/gtfsparser/gtfs" - "os" ) // ServiceCalDatesRemover removes any entry in calendar_dates.txt by @@ -219,7 +220,7 @@ func (sm *ServiceCalDatesRem) freeTripId(feed *gtfsparser.Feed, prefix string) s return tid } } - panic(errors.New("Ran out of free trip ids.")) + panic(errors.New("ran out of free trip ids")) } // get a free service id with the given prefix @@ -231,5 +232,5 @@ func (sm *ServiceCalDatesRem) freeServiceId(feed *gtfsparser.Feed, prefix string return sid } } - panic(errors.New("Ran out of free service ids.")) + panic(errors.New("ran out of free service ids")) } diff --git a/processors/serviceduplicateremover.go b/processors/serviceduplicateremover.go index a269fd2..a05bee4 100755 --- a/processors/serviceduplicateremover.go +++ b/processors/serviceduplicateremover.go @@ -9,10 +9,11 @@ package processors import ( "encoding/binary" "fmt" - "github.com/public-transport/gtfsparser" - gtfs "github.com/public-transport/gtfsparser/gtfs" "hash/fnv" "os" + + "github.com/public-transport/gtfsparser" + gtfs "github.com/public-transport/gtfsparser/gtfs" ) // ServiceDuplicateRemover removes duplicate services. Services are considered equal if they @@ -47,7 +48,7 @@ func (sdr ServiceDuplicateRemover) Run(feed *gtfsparser.Feed) { } sc := amaps[s] - eqServices := sdr.getEquivalentServices(s, amaps, feed, chunks[sc.hash]) + eqServices := sdr.getEquivalentServices(s, amaps, chunks[sc.hash]) if len(eqServices) > 0 { sdr.combineServices(feed, append(eqServices, s), trips) @@ -65,7 +66,7 @@ func (sdr ServiceDuplicateRemover) Run(feed *gtfsparser.Feed) { } // Return the services that are equivalent to service -func (m ServiceDuplicateRemover) getEquivalentServices(serv *gtfs.Service, amaps map[*gtfs.Service]ServiceCompressed, feed *gtfsparser.Feed, chunks [][]*gtfs.Service) []*gtfs.Service { +func (m ServiceDuplicateRemover) getEquivalentServices(serv *gtfs.Service, amaps map[*gtfs.Service]ServiceCompressed, chunks [][]*gtfs.Service) []*gtfs.Service { rets := make([][]*gtfs.Service, len(chunks)) sem := make(chan empty, len(chunks)) @@ -127,7 +128,7 @@ func (m ServiceDuplicateRemover) getActiveMaps(feed *gtfsparser.Feed) map[*gtfs. cur.start = first cur.end = last cur.activeMap = sm.getActiveOnMap(first.GetTime(), last.GetTime(), s) - cur.hash = m.serviceHash(cur.activeMap, first, last, s) + cur.hash = m.serviceHash(cur.activeMap, first, last) rets[j][s] = cur } @@ -177,7 +178,7 @@ func (m ServiceDuplicateRemover) getServiceChunks(feed *gtfsparser.Feed, amaps m return chunks } -func (m ServiceDuplicateRemover) serviceHash(active []bool, first gtfs.Date, last gtfs.Date, s *gtfs.Service) uint32 { +func (m ServiceDuplicateRemover) serviceHash(active []bool, first gtfs.Date, last gtfs.Date) uint32 { h := fnv.New32a() bls := boolsToBytes(active) diff --git a/processors/serviceminimizer.go b/processors/serviceminimizer.go index 3904cab..297b1cf 100755 --- a/processors/serviceminimizer.go +++ b/processors/serviceminimizer.go @@ -8,10 +8,11 @@ package processors import ( "fmt" - "github.com/public-transport/gtfsparser" - gtfs "github.com/public-transport/gtfsparser/gtfs" "os" "time" + + "github.com/public-transport/gtfsparser" + gtfs "github.com/public-transport/gtfsparser/gtfs" ) // ServiceMinimizer minimizes services by finding optimal calendar.txt and @@ -162,7 +163,7 @@ out: continue } - c := sm.countExceptions(service, activeOn, d, startDiff, endDiff, a, b, e) + c := sm.countExceptions(activeOn, d, startDiff, endDiff, a, b, e) if c < e { e = c @@ -182,7 +183,7 @@ out: sm.updateService(service, bestMap, bestA, bestB, startTime, endTime, start, end) } -func (sm ServiceMinimizer) countExceptions(s *gtfs.Service, actmap []bool, bm uint, startDiff int, endDiff int, a int, b int, max uint) uint { +func (sm ServiceMinimizer) countExceptions(actmap []bool, bm uint, startDiff int, endDiff int, a int, b int, max uint) uint { ret := uint(0) l := len(actmap) diff --git a/processors/servicenonoverlapper.go b/processors/servicenonoverlapper.go index 3e4533c..392ed6e 100755 --- a/processors/servicenonoverlapper.go +++ b/processors/servicenonoverlapper.go @@ -8,12 +8,13 @@ package processors import ( "fmt" - "github.com/public-transport/gtfsparser" - gtfs "github.com/public-transport/gtfsparser/gtfs" - "golang.org/x/exp/slices" "os" "sort" "strconv" + + "github.com/public-transport/gtfsparser" + gtfs "github.com/public-transport/gtfsparser/gtfs" + "golang.org/x/exp/slices" ) type DayType struct { @@ -54,8 +55,8 @@ func (sm ServiceNonOverlapper) Run(feed *gtfsparser.Feed) { } } - for wd, _ := range days { - for day, _ := range days[wd] { + for wd := range days { + for day := range days[wd] { sort.Slice(days[wd][day], func(i, j int) bool { return days[wd][day][i].Id < days[wd][day][j].Id }) @@ -63,7 +64,7 @@ func (sm ServiceNonOverlapper) Run(feed *gtfsparser.Feed) { } // collect day types - for wd, _ := range days { + for wd := range days { for day, trips := range days[wd] { found := false for i, existing := range day_types[wd] { @@ -82,7 +83,7 @@ func (sm ServiceNonOverlapper) Run(feed *gtfsparser.Feed) { return len(day_types[wd][i].Dates) > len(day_types[wd][j].Dates) }) - for i, _ := range day_types[wd] { + for i := range day_types[wd] { sort.Slice(day_types[wd][i].Dates, func(a, b int) bool { return day_types[wd][i].Dates[a].GetTime().Before(day_types[wd][i].Dates[b].GetTime()) }) @@ -95,7 +96,7 @@ func (sm ServiceNonOverlapper) Run(feed *gtfsparser.Feed) { feed.StopTimesAddFlds = make(map[string]map[string]map[int]string) // write services - for wd, _ := range days { + for wd := range days { for _, t := range day_types[wd] { weeknums := make([]int, 0) for _, d := range t.Dates { @@ -108,7 +109,7 @@ func (sm ServiceNonOverlapper) Run(feed *gtfsparser.Feed) { if len(day_types[wd]) > 1 { id += " (" - for i, _ := range weeknums { + for i := range weeknums { if i == 0 { id += sm.YearWeekName + strconv.Itoa((weeknums[i])) continue diff --git a/processors/shapeduplicateremover.go b/processors/shapeduplicateremover.go index ac9060a..a3af057 100755 --- a/processors/shapeduplicateremover.go +++ b/processors/shapeduplicateremover.go @@ -8,10 +8,11 @@ package processors import ( "fmt" - "github.com/public-transport/gtfsparser" - gtfs "github.com/public-transport/gtfsparser/gtfs" "math" "os" + + "github.com/public-transport/gtfsparser" + gtfs "github.com/public-transport/gtfsparser/gtfs" ) // ShapeDuplicateRemover removes duplicate shapes @@ -70,7 +71,7 @@ func (sdr ShapeDuplicateRemover) Run(feed *gtfsparser.Feed) { if sdr.deleted[s] { continue } - eqShps := sdr.getEquShps(s, feed, chunkIdxs) + eqShps := sdr.getEquShps(s, chunkIdxs) if len(eqShps) > 0 { sdr.combineShapes(feed, append(eqShps, s), tidx) @@ -83,7 +84,7 @@ func (sdr ShapeDuplicateRemover) Run(feed *gtfsparser.Feed) { } // Return all shapes that are equivalent (within MaxEqDist) to shape -func (sdr *ShapeDuplicateRemover) getEquShps(shp *gtfs.Shape, feed *gtfsparser.Feed, idxs []*ShapeIdx) []*gtfs.Shape { +func (sdr *ShapeDuplicateRemover) getEquShps(shp *gtfs.Shape, idxs []*ShapeIdx) []*gtfs.Shape { rets := make([][]*gtfs.Shape, len(idxs)) sem := make(chan empty, len(idxs)) diff --git a/processors/shapeidx.go b/processors/shapeidx.go index c7e28bc..472a00b 100755 --- a/processors/shapeidx.go +++ b/processors/shapeidx.go @@ -7,8 +7,9 @@ package processors import ( - gtfs "github.com/public-transport/gtfsparser/gtfs" "math" + + gtfs "github.com/public-transport/gtfsparser/gtfs" ) // ShapeIdx stores objects for fast nearest-neighbor @@ -137,7 +138,7 @@ func (gi *ShapeIdx) isects(x0, y0, x1, y1 float64, x, y uint) bool { ocode1 := gi.ocode(x1, y1, xmin, ymin, xmax, ymax) isect := false - for true { + for { if (ocode0 | ocode1) == 0 { return true } else if (ocode0 & ocode1) != 0 { diff --git a/processors/shapesnapper.go b/processors/shapesnapper.go index 98819ba..83b562e 100644 --- a/processors/shapesnapper.go +++ b/processors/shapesnapper.go @@ -9,10 +9,11 @@ package processors import ( "errors" "fmt" - "github.com/public-transport/gtfsparser" - gtfs "github.com/public-transport/gtfsparser/gtfs" "math" "os" + + "github.com/public-transport/gtfsparser" + gtfs "github.com/public-transport/gtfsparser/gtfs" ) // ShapeMinimizer minimizes shapes. @@ -88,6 +89,7 @@ func (sm ShapeSnapper) Run(feed *gtfsparser.Feed) { func (sm *ShapeSnapper) snapTo(stop *gtfs.Stop, distT float32, shape *gtfs.Shape) (float64, float64) { shp := sm.mercs[shape] + // TODO No value is equal to NaN not even NaN itself so is this if statement redundant? if float64(distT) != math.NaN() { for i := 1; i < len(shape.Points); i++ { if shape.Points[i].Dist_traveled <= distT && i < len(shape.Points) - 1 && shape.Points[i+1].Dist_traveled >= distT { @@ -134,5 +136,5 @@ func (sm *ShapeSnapper) freeStopId(feed *gtfsparser.Feed, suffix string) string return sid } } - panic(errors.New("Ran out of free stop ids.")) + panic(errors.New("ran out of free stop ids")) } diff --git a/processors/tripduplicateremover.go b/processors/tripduplicateremover.go index e558223..9d454b4 100755 --- a/processors/tripduplicateremover.go +++ b/processors/tripduplicateremover.go @@ -9,14 +9,15 @@ package processors import ( "encoding/binary" "fmt" - "github.com/public-transport/gtfsparser" - gtfs "github.com/public-transport/gtfsparser/gtfs" "hash/fnv" "os" "strconv" "strings" "time" "unsafe" + + "github.com/public-transport/gtfsparser" + gtfs "github.com/public-transport/gtfsparser/gtfs" ) // TripDuplicateRemover merges semantically equivalent routes @@ -188,9 +189,7 @@ func (m *TripDuplicateRemover) combineAdjTrips(feed *gtfsparser.Feed, ref *gtfs. sl := make([]*gtfs.Attribution, 0) ref.Attributions = &sl } - for _, attr := range *t.Attributions { - *ref.Attributions = append(*ref.Attributions, attr) - } + *ref.Attributions = append(*ref.Attributions, *t.Attributions...) } for fld, v := range feed.TripsAddFlds { @@ -227,9 +226,7 @@ func (m *TripDuplicateRemover) combineContTrips(feed *gtfsparser.Feed, ref *gtfs sl := make([]*gtfs.Attribution, 0) ref.Attributions = &sl } - for _, attr := range *t.Attributions { - *ref.Attributions = append(*ref.Attributions, attr) - } + *ref.Attributions = append(*ref.Attributions, *t.Attributions...) } feed.DeleteTrip(t.Id) @@ -249,9 +246,7 @@ func (m *TripDuplicateRemover) combineEqTrips(feed *gtfsparser.Feed, ref *gtfs.T sl := make([]*gtfs.Attribution, 0) ref.Attributions = &sl } - for _, attr := range *t.Attributions { - *ref.Attributions = append(*ref.Attributions, attr) - } + *ref.Attributions = append(*ref.Attributions, *t.Attributions...) } if ref.Bikes_allowed == 0 && t.Bikes_allowed > 0 { @@ -565,9 +560,7 @@ func (m *TripDuplicateRemover) getTripChunks(feed *gtfsparser.Feed) [][][]*gtfs. for hash := range trips { chunks[curchunk] = append(chunks[curchunk], make([]*gtfs.Trip, 0)) - for _, t := range trips[hash] { - chunks[curchunk][len(chunks[curchunk])-1] = append(chunks[curchunk][len(chunks[curchunk])-1], t) - } + chunks[curchunk][len(chunks[curchunk])-1] = append(chunks[curchunk][len(chunks[curchunk])-1], trips[hash]...) if len(chunks[curchunk]) == chunksize { curchunk++