|
| 1 | +package main |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "crypto/md5" |
| 6 | + _ "embed" |
| 7 | + "encoding/json" |
| 8 | + "fmt" |
| 9 | + "io" |
| 10 | + "log" |
| 11 | + "net/http" |
| 12 | + "os" |
| 13 | + "path/filepath" |
| 14 | + "sync" |
| 15 | + "time" |
| 16 | + |
| 17 | + "github.com/bradfitz/slice" |
| 18 | + "golang.org/x/oauth2/google" |
| 19 | + "golang.org/x/oauth2/jwt" |
| 20 | + drive "google.golang.org/api/drive/v3" |
| 21 | + option "google.golang.org/api/option" |
| 22 | +) |
| 23 | + |
| 24 | +type remoteFile struct { |
| 25 | + File *drive.File |
| 26 | + Parent *drive.File |
| 27 | +} |
| 28 | + |
| 29 | +type syncedFile struct { |
| 30 | + File *drive.File |
| 31 | + Path string |
| 32 | + ModTime time.Time |
| 33 | +} |
| 34 | + |
| 35 | +type configuration struct { |
| 36 | + RootID string `json:"root_id"` |
| 37 | + Title string `json:"title"` |
| 38 | +} |
| 39 | + |
| 40 | +//go:embed config.json |
| 41 | +var googleServiceAccountConfiguration []byte |
| 42 | + |
| 43 | +var appConf configuration |
| 44 | +var apiConf *jwt.Config |
| 45 | +var client *http.Client |
| 46 | +var service *drive.Service |
| 47 | + |
| 48 | +func synchronize() { |
| 49 | + setProgress(0.00, "Initialization...", false) |
| 50 | + err := json.Unmarshal(googleServiceAccountConfiguration, &appConf) |
| 51 | + if err != nil { |
| 52 | + setProgress(1, err.Error(), false) |
| 53 | + return |
| 54 | + } |
| 55 | + setProgress(0.00, "Initialization... OK", true) |
| 56 | + setProgress(0.01, "Connection...", false) |
| 57 | + apiConf, err = google.JWTConfigFromJSON(googleServiceAccountConfiguration, "https://www.googleapis.com/auth/drive") |
| 58 | + if err != nil { |
| 59 | + setProgress(1, err.Error(), false) |
| 60 | + return |
| 61 | + } |
| 62 | + |
| 63 | + client = apiConf.Client(context.Background()) |
| 64 | + service, err = drive.NewService(context.Background(), option.WithHTTPClient(client)) |
| 65 | + if err != nil { |
| 66 | + setProgress(1, err.Error(), false) |
| 67 | + return |
| 68 | + } |
| 69 | + // ... |
| 70 | + remoteFiles := map[string]*remoteFile{} |
| 71 | + files := []syncedFile{} |
| 72 | + |
| 73 | + setProgress(0.01, "Connection... OK", true) |
| 74 | + setProgress(0.02, "Remote index...", false) |
| 75 | + pageToken := "" |
| 76 | + for { |
| 77 | + q := service.Files.List().Fields("nextPageToken, files/*") |
| 78 | + // If we have a pageToken set, apply it to the query |
| 79 | + if pageToken != "" { |
| 80 | + q = q.PageToken(pageToken) |
| 81 | + } |
| 82 | + r, err := q.Do() |
| 83 | + if err != nil { |
| 84 | + setProgress(1, err.Error(), false) |
| 85 | + return |
| 86 | + } |
| 87 | + for _, file := range r.Files { |
| 88 | + remoteFiles[file.Id] = &remoteFile{ |
| 89 | + Parent: nil, |
| 90 | + File: file, |
| 91 | + } |
| 92 | + //setProgress(0.02, fmt.Sprintf("Indexing remote files... %s", file.Name), true) |
| 93 | + } |
| 94 | + pageToken = r.NextPageToken |
| 95 | + if pageToken == "" { |
| 96 | + break |
| 97 | + } |
| 98 | + } |
| 99 | + setProgress(0.02, "Remote index... OK", true) |
| 100 | + processName := filepath.Base(os.Args[0]) |
| 101 | + |
| 102 | + setProgress(0.03, "File tree...", false) |
| 103 | + rootID := appConf.RootID |
| 104 | + for _, remoteFile := range remoteFiles { |
| 105 | + if len(remoteFile.File.Parents) == 0 { |
| 106 | + if rootID == "" { |
| 107 | + rootID = remoteFile.File.Id |
| 108 | + } |
| 109 | + } else { |
| 110 | + for _, parentID := range remoteFile.File.Parents { |
| 111 | + parent := remoteFiles[parentID] |
| 112 | + remoteFile.Parent = parent.File |
| 113 | + } |
| 114 | + } |
| 115 | + } |
| 116 | + setProgress(0.03, "File tree... OK", true) |
| 117 | + setProgress(0.04, "Comparison...", false) |
| 118 | + layout := "2006-01-02T15:04:05.000Z" |
| 119 | + loadedSize := uint64(0) |
| 120 | + loadedCount := 0 |
| 121 | + totalSize := int64(0) |
| 122 | + totalCount := 0 |
| 123 | + for _, remoteFile := range remoteFiles { |
| 124 | + if remoteFile.File.MimeType == "application/vnd.google-apps.folder" || remoteFile.File.Name == processName || remoteFile.File.Name == "FOnlineUpdater.cfg" { |
| 125 | + continue |
| 126 | + } |
| 127 | + pathParts := []string{} |
| 128 | + scope := remoteFile |
| 129 | + for (*scope).Parent != nil { |
| 130 | + pathParts = append([]string{(*scope).File.Name}, pathParts...) |
| 131 | + scope = remoteFiles[(*scope).Parent.Id] |
| 132 | + } |
| 133 | + filePath := filepath.Join(pathParts...) |
| 134 | + if filePath == "" { |
| 135 | + continue |
| 136 | + } |
| 137 | + fileSize, fileModTime, fileError := getFileStats(filePath) |
| 138 | + remoteLastModified, err := time.Parse(layout, remoteFile.File.ModifiedTime) |
| 139 | + if err != nil { |
| 140 | + setProgress(1, err.Error(), false) |
| 141 | + return |
| 142 | + } |
| 143 | + shouldDownload := fileError != nil || fileSize == 0 || (remoteLastModified.After(fileModTime)) |
| 144 | + if !shouldDownload { |
| 145 | + fileMD5 := getFileMd5(filePath) |
| 146 | + shouldDownload = fileMD5 == "" || fileMD5 != remoteFile.File.Md5Checksum |
| 147 | + } |
| 148 | + if shouldDownload { |
| 149 | + files = append(files, syncedFile{ |
| 150 | + File: remoteFile.File, |
| 151 | + Path: filePath, |
| 152 | + ModTime: remoteLastModified, |
| 153 | + }) |
| 154 | + totalSize += remoteFile.File.Size |
| 155 | + totalCount++ |
| 156 | + } |
| 157 | + } |
| 158 | + // ... |
| 159 | + slice.Sort(files[:], func(i, j int) bool { |
| 160 | + return files[i].File.Size > files[j].File.Size |
| 161 | + }) |
| 162 | + interval := time.Millisecond * 500 |
| 163 | + wg := sync.WaitGroup{} |
| 164 | + setProgress(0.04, "Comparison... OK", true) |
| 165 | + setProgress(0.05, fmt.Sprintf("Synchronization... %d/%d", 0, totalCount), false) |
| 166 | + for _, sFile := range files { |
| 167 | + time.Sleep(interval) |
| 168 | + wg.Add(1) |
| 169 | + // @todo: make it more memory-safe? |
| 170 | + go func(realPath string, tmpPath string, id string, mod time.Time) { |
| 171 | + t1 := time.Now() |
| 172 | + dir := filepath.Dir(realPath) |
| 173 | + os.MkdirAll(dir, os.ModePerm) |
| 174 | + resp, err := service.Files.Get(id).Download() |
| 175 | + if err != nil { |
| 176 | + setProgress(1, err.Error(), false) |
| 177 | + return |
| 178 | + } |
| 179 | + out, err := os.Create(tmpPath) |
| 180 | + if err != nil { |
| 181 | + resp.Body.Close() |
| 182 | + setProgress(1, err.Error(), false) |
| 183 | + return |
| 184 | + } |
| 185 | + prevSize := uint64(0) |
| 186 | + counter := &WriteCounter{ |
| 187 | + Logger: func(n uint64) { |
| 188 | + loadedSize += n - prevSize |
| 189 | + prevSize = n |
| 190 | + setProgress(float64(loadedSize)/float64(totalSize)*0.95+0.05, fmt.Sprintf("Synchronization... %d/%d", loadedCount, totalCount), true) |
| 191 | + }, |
| 192 | + } |
| 193 | + if _, err = io.Copy(out, io.TeeReader(resp.Body, counter)); err != nil { |
| 194 | + out.Close() |
| 195 | + resp.Body.Close() |
| 196 | + setProgress(1, err.Error(), false) |
| 197 | + return |
| 198 | + } |
| 199 | + out.Close() |
| 200 | + resp.Body.Close() |
| 201 | + if err = os.Rename(tmpPath, realPath); err != nil { |
| 202 | + setProgress(1, err.Error(), false) |
| 203 | + return |
| 204 | + } |
| 205 | + err = os.Chtimes(realPath, mod, mod) |
| 206 | + if err != nil { |
| 207 | + setProgress(1, err.Error(), false) |
| 208 | + return |
| 209 | + } |
| 210 | + wg.Done() |
| 211 | + loadedCount += 1 |
| 212 | + diff := time.Now().Sub(t1) |
| 213 | + if diff < interval { |
| 214 | + interval = diff |
| 215 | + } |
| 216 | + }(sFile.Path, sFile.Path+".tmp", sFile.File.Id, sFile.ModTime) |
| 217 | + } |
| 218 | + wg.Wait() |
| 219 | + setProgress(0.05, "Synchronization... OK", true) |
| 220 | + setProgress(1, "All files up to date!", false) |
| 221 | +} |
| 222 | + |
| 223 | +func getFileStats(filePath string) (int64, time.Time, error) { |
| 224 | + stat, err := os.Stat(filePath) |
| 225 | + if os.IsNotExist(err) { |
| 226 | + return 0, time.Now(), err |
| 227 | + } |
| 228 | + |
| 229 | + size := stat.Size() |
| 230 | + time := stat.ModTime() |
| 231 | + return size, time, nil |
| 232 | +} |
| 233 | + |
| 234 | +func getFileMd5(filePath string) string { |
| 235 | + f, err := os.Open(filePath) |
| 236 | + if err != nil { |
| 237 | + log.Fatal(err) |
| 238 | + } |
| 239 | + defer f.Close() |
| 240 | + |
| 241 | + h := md5.New() |
| 242 | + if _, err := io.Copy(h, f); err != nil { |
| 243 | + log.Fatal(err) |
| 244 | + } |
| 245 | + |
| 246 | + return string(h.Sum(nil)) |
| 247 | +} |
| 248 | + |
| 249 | +type WriteCounter struct { |
| 250 | + Total uint64 |
| 251 | + Logger func(uint64) |
| 252 | +} |
| 253 | + |
| 254 | +func (wc *WriteCounter) Write(p []byte) (int, error) { |
| 255 | + n := len(p) |
| 256 | + wc.Total += uint64(n) |
| 257 | + wc.Logger(wc.Total) |
| 258 | + return n, nil |
| 259 | +} |
0 commit comments