1
|
|
|
// Copyright 2018 Fedir RYKHTIK. All rights reserved. |
2
|
|
|
// Use of this source code is governed by the GNU GPL 3.0 |
3
|
|
|
// license that can be found in the LICENSE file. |
4
|
|
|
package main |
5
|
|
|
|
6
|
|
|
import ( |
7
|
|
|
"encoding/csv" |
8
|
|
|
"encoding/json" |
9
|
|
|
"flag" |
10
|
|
|
"fmt" |
11
|
|
|
"log" |
12
|
|
|
"os" |
13
|
|
|
"path/filepath" |
14
|
|
|
"strings" |
15
|
|
|
"sync" |
16
|
|
|
"time" |
17
|
|
|
|
18
|
|
|
"github.com/fedir/ghstat/httpcache" |
19
|
|
|
"github.com/fedir/ghstat/timing" |
20
|
|
|
) |
21
|
|
|
|
22
|
|
|
func main() { |
23
|
|
|
var ( |
24
|
|
|
clearHTTPCache = flag.Bool("cc", false, "Clear HTTP cache") |
25
|
|
|
clearHTTPCacheDryRun = flag.Bool("ccdr", false, "Clear HTTP cache (dry run)") |
26
|
|
|
debug = flag.Bool("d", false, "Debug mode") |
27
|
|
|
resultFileSavePath = flag.String("f", "", "File path where result CSV file will be saved") |
28
|
|
|
rateLimitCheck = flag.Bool("l", false, "Rate limit check") |
29
|
|
|
repositoriesKeysManual = flag.String("r", "", "Repositories keys") |
30
|
|
|
tmpFolder = flag.String("t", "test_data", "Clear HTTP cache (dry run)") |
31
|
|
|
repositoriesKeys = []string{} |
32
|
|
|
) |
33
|
|
|
flag.Parse() |
34
|
|
|
if *clearHTTPCache || *clearHTTPCacheDryRun { |
35
|
|
|
clearHTTPCacheFolder(*tmpFolder, *clearHTTPCacheDryRun) |
36
|
|
|
os.Exit(0) |
37
|
|
|
} |
38
|
|
|
if *rateLimitCheck { |
39
|
|
|
checkAndPrintRateLimit() |
40
|
|
|
os.Exit(0) |
41
|
|
|
} |
42
|
|
|
if *rateLimitCheck { |
43
|
|
|
checkAndPrintRateLimit() |
44
|
|
|
os.Exit(0) |
45
|
|
|
} |
46
|
|
|
if *repositoriesKeysManual != "" { |
47
|
|
|
repositoriesKeys = strings.Split(*repositoriesKeysManual, ",") |
48
|
|
|
} else { |
49
|
|
|
repositoriesKeys = []string{ |
50
|
|
|
"astaxie/beego", |
51
|
|
|
"gohugoio/hugo", |
52
|
|
|
"gin-gonic/gin", |
53
|
|
|
"labstack/echo", |
54
|
|
|
"revel/revel", |
55
|
|
|
"gobuffalo/buffalo", |
56
|
|
|
"go-chi/chi", |
57
|
|
|
"kataras/iris", |
58
|
|
|
} |
59
|
|
|
} |
60
|
|
|
csvFilePath := "" |
61
|
|
|
if *resultFileSavePath != "" { |
62
|
|
|
csvFilePath = *resultFileSavePath |
63
|
|
|
} else { |
64
|
|
|
csvFilePath = "result.csv" |
65
|
|
|
} |
66
|
|
|
var ghData = [][]string{} |
67
|
|
|
headers := []string{ |
68
|
|
|
"Name", |
69
|
|
|
"URL", |
70
|
|
|
"Author", |
71
|
|
|
"Author's followers", |
72
|
|
|
"Top 10 contributors followers", |
73
|
|
|
"Created at", |
74
|
|
|
"Age in days", |
75
|
|
|
"Total commits", |
76
|
|
|
"Total additions", |
77
|
|
|
"Total deletions", |
78
|
|
|
"Total code changes", |
79
|
|
|
"Medium commit size", |
80
|
|
|
"Stargazers", |
81
|
|
|
"Forks", |
82
|
|
|
"Contributors", |
83
|
|
|
"Active forkers, %", |
84
|
|
|
"Open issues", |
85
|
|
|
"Total issues", |
86
|
|
|
"Closed issues, %", |
87
|
|
|
"Place", |
88
|
|
|
} |
89
|
|
|
ghDataColumnIndexes := map[string]int{ |
90
|
|
|
"nameColumn": 0, |
91
|
|
|
"authorsFollowersColumn": 3, |
92
|
|
|
"top10ContributorsFollowersColumn": 4, |
93
|
|
|
"totalAdditionsColumn": 8, |
94
|
|
|
"ageColumn": 6, |
95
|
|
|
"totalCommitsColumn": 7, |
96
|
|
|
"totalDeletionsColumn": 9, |
97
|
|
|
"totalCodeChangesColumn": 10, |
98
|
|
|
"mediCommitSizeColumn": 11, |
99
|
|
|
"stargazersColumn": 12, |
100
|
|
|
"activeForkersColumn": 15, |
101
|
|
|
"closedIssuesPercentageColumn": 18, |
102
|
|
|
"totalPointsColumnIndex": 19, |
103
|
|
|
} |
104
|
|
|
wg := &sync.WaitGroup{} |
105
|
|
|
dataChan := make(chan []string, len(repositoriesKeys)) |
106
|
|
|
for _, rKey := range repositoriesKeys { |
107
|
|
|
wg.Add(1) |
108
|
|
|
go fillRepositoryStatistics(rKey, *tmpFolder, *debug, wg, dataChan) |
109
|
|
|
} |
110
|
|
|
for _ = range repositoriesKeys { |
|
|
|
|
111
|
|
|
ghData = append(ghData, <-dataChan) |
112
|
|
|
} |
113
|
|
|
wg.Wait() |
114
|
|
|
rateGhData(ghData, ghDataColumnIndexes) |
115
|
|
|
writeCsv(csvFilePath, headers, ghData) |
116
|
|
|
} |
117
|
|
|
|
118
|
|
|
func fillRepositoryStatistics(rKey string, tmpFolder string, debug bool, wg *sync.WaitGroup, dataChan chan []string) { |
119
|
|
|
defer wg.Done() |
120
|
|
|
repositoryData := getRepositoryStatistics(rKey, tmpFolder, debug) |
121
|
|
|
authorLogin := getRepositoryCommits(rKey, tmpFolder, debug) |
122
|
|
|
authorFollowers := 0 |
123
|
|
|
if authorLogin != "" { |
124
|
|
|
authorFollowers = getUserFollowers(authorLogin, tmpFolder, debug) |
125
|
|
|
} |
126
|
|
|
closedIssues := getRepositoryClosedIssues(rKey, tmpFolder, debug) |
127
|
|
|
topContributorsFollowers, totalContributors := getRepositoryContributors(rKey, tmpFolder, debug) |
128
|
|
|
activeForkersPercentage := getActiveForkersPercentage(totalContributors, repositoryData.Forks) |
129
|
|
|
closedIssuesPercentage := getClosedIssuesPercentage(repositoryData.OpenIssues, int(closedIssues)) |
130
|
|
|
contributionStatistics := getContributionStatistics(rKey, tmpFolder, debug) |
131
|
|
|
ghProjectData := []string{ |
132
|
|
|
repositoryData.Name, |
133
|
|
|
fmt.Sprintf("https://github.com/%s", repositoryData.FullName), |
134
|
|
|
fmt.Sprintf("%s", func(a string) string { |
135
|
|
|
if a == "" { |
136
|
|
|
a = "[Account removed]" |
137
|
|
|
} |
138
|
|
|
return a |
139
|
|
|
}(authorLogin)), |
140
|
|
|
fmt.Sprintf("%d", authorFollowers), |
141
|
|
|
fmt.Sprintf("%d", topContributorsFollowers), |
142
|
|
|
fmt.Sprintf("%d/%02d", repositoryData.CreatedAt.Year(), repositoryData.CreatedAt.Month()), |
143
|
|
|
fmt.Sprintf("%d", int(time.Since(repositoryData.CreatedAt).Seconds()/86400)), |
144
|
|
|
fmt.Sprintf("%d", contributionStatistics.TotalCommits), |
145
|
|
|
fmt.Sprintf("%d", contributionStatistics.TotalAdditions), |
146
|
|
|
fmt.Sprintf("%d", contributionStatistics.TotalDeletions), |
147
|
|
|
fmt.Sprintf("%d", contributionStatistics.TotalCodeChanges), |
148
|
|
|
fmt.Sprintf("%d", contributionStatistics.MediumCommitSize), |
149
|
|
|
fmt.Sprintf("%d", repositoryData.Watchers), |
150
|
|
|
fmt.Sprintf("%d", repositoryData.Forks), |
151
|
|
|
fmt.Sprintf("%d", totalContributors), |
152
|
|
|
fmt.Sprintf("%.2f", activeForkersPercentage), |
153
|
|
|
fmt.Sprintf("%d", repositoryData.OpenIssues), |
154
|
|
|
fmt.Sprintf("%d", closedIssues+repositoryData.OpenIssues), |
155
|
|
|
fmt.Sprintf("%.2f", closedIssuesPercentage), |
156
|
|
|
"0", |
157
|
|
|
} |
158
|
|
|
dataChan <- ghProjectData |
159
|
|
|
} |
160
|
|
|
|
161
|
|
|
func clearHTTPCacheFolder(tmpFolderPath string, dryRun bool) error { |
162
|
|
|
d, err := os.Open(tmpFolderPath) |
163
|
|
|
if err != nil { |
164
|
|
|
log.Fatalf("Could not open %s", tmpFolderPath) |
165
|
|
|
} |
166
|
|
|
defer d.Close() |
167
|
|
|
names, err := d.Readdirnames(-1) |
168
|
|
|
if err != nil { |
169
|
|
|
log.Fatalf("Could not read from %s", tmpFolderPath) |
170
|
|
|
} |
171
|
|
|
for _, name := range names { |
172
|
|
|
fp := filepath.Join(tmpFolderPath, name) |
173
|
|
|
if dryRun { |
174
|
|
|
fmt.Printf("Deleted %s\n", fp) |
175
|
|
|
} else { |
176
|
|
|
err = os.RemoveAll(fp) |
177
|
|
|
if err != nil { |
178
|
|
|
log.Fatalf("Could not remove %s", fp) |
179
|
|
|
} |
180
|
|
|
fmt.Printf("Deleted %s\n", fp) |
181
|
|
|
} |
182
|
|
|
} |
183
|
|
|
return nil |
184
|
|
|
} |
185
|
|
|
|
186
|
|
|
func checkAndPrintRateLimit() { |
187
|
|
|
type RateLimits struct { |
188
|
|
|
Resources struct { |
189
|
|
|
Core struct { |
190
|
|
|
Limit int `json:"limit"` |
191
|
|
|
Remaining int `json:"remaining"` |
192
|
|
|
Reset int `json:"reset"` |
193
|
|
|
} `json:"core"` |
194
|
|
|
Search struct { |
195
|
|
|
Limit int `json:"limit"` |
196
|
|
|
Remaining int `json:"remaining"` |
197
|
|
|
Reset int `json:"reset"` |
198
|
|
|
} `json:"search"` |
199
|
|
|
GraphQL struct { |
200
|
|
|
Limit int `json:"limit"` |
201
|
|
|
Remaining int `json:"remaining"` |
202
|
|
|
Reset int `json:"reset"` |
203
|
|
|
} `json:"graphql"` |
204
|
|
|
} `json:"resources"` |
205
|
|
|
Rate struct { |
206
|
|
|
Limit int `json:"limit"` |
207
|
|
|
Remaining int `json:"remaining"` |
208
|
|
|
Reset int `json:"reset"` |
209
|
|
|
} `json:"rate"` |
210
|
|
|
} |
211
|
|
|
url := "https://api.github.com/rate_limit" |
212
|
|
|
resp, statusCode, err := httpcache.MakeHTTPRequest(url) |
213
|
|
|
if err != nil { |
214
|
|
|
log.Fatalf("Error during checking rate limit : %d %v#", statusCode, err) |
215
|
|
|
} |
216
|
|
|
jsonResponse, _, _ := httpcache.ReadResp(resp) |
217
|
|
|
rateLimits := RateLimits{} |
218
|
|
|
json.Unmarshal(jsonResponse, &rateLimits) |
219
|
|
|
fmt.Printf("Core: %d/%d (reset in %d minutes)\n", rateLimits.Resources.Core.Remaining, rateLimits.Resources.Core.Limit, timing.GetRelativeTime(rateLimits.Resources.Core.Reset)) |
220
|
|
|
fmt.Printf("Search: %d/%d (reset in %d minutes)\n", rateLimits.Resources.Search.Remaining, rateLimits.Resources.Search.Limit, timing.GetRelativeTime(rateLimits.Resources.Search.Reset)) |
221
|
|
|
fmt.Printf("GraphQL: %d/%d (reset in %d minutes)\n", rateLimits.Resources.GraphQL.Remaining, rateLimits.Resources.GraphQL.Limit, timing.GetRelativeTime(rateLimits.Resources.GraphQL.Reset)) |
222
|
|
|
fmt.Printf("Rate: %d/%d (reset in %d minutes)\n", rateLimits.Rate.Remaining, rateLimits.Rate.Limit, timing.GetRelativeTime(rateLimits.Rate.Reset)) |
223
|
|
|
} |
224
|
|
|
|
225
|
|
|
func writeCsv(csvFilePath string, headers []string, ghData [][]string) { |
226
|
|
|
file, err := os.Create(csvFilePath) |
227
|
|
|
if err != nil { |
228
|
|
|
log.Fatal("Cannot create file", err) |
229
|
|
|
} |
230
|
|
|
defer file.Close() |
231
|
|
|
writer := csv.NewWriter(file) |
232
|
|
|
defer writer.Flush() |
233
|
|
|
err = writer.Write(headers) |
234
|
|
|
if err != nil { |
235
|
|
|
log.Fatal("Cannot write to file", err) |
236
|
|
|
} |
237
|
|
|
for _, value := range ghData { |
238
|
|
|
err := writer.Write(value) |
239
|
|
|
if err != nil { |
240
|
|
|
log.Fatal("Cannot write to file", err) |
241
|
|
|
} |
242
|
|
|
} |
243
|
|
|
} |
244
|
|
|
|