mirror of https://github.com/vitalif/grive2
Compare commits
170 Commits
Author | SHA1 | Date |
---|---|---|
Vitaliy Filippov | be52cb21a7 | |
Vitaliy Filippov | 648ff8eea1 | |
Vitaliy Filippov | eb82bfe28b | |
Vitaliy Filippov | f9e9fe510d | |
Vitaliy Filippov | ae38035ef4 | |
Christoph Junghans | b788284020 | |
Kilian von Pflugk | cd4665ae1b | |
Vitaliy Filippov | d03c4a24ce | |
Vitaliy Filippov | 328987ec34 | |
Vitaliy Filippov | 6645206d27 | |
Jasper Young | 5c8e87ee9a | |
Christoph Junghans | 3cf1c058a3 | |
Vitaliy Filippov | 6901fbb169 | |
ncaq | 48f5f0e52f | |
Vitaliy Filippov | e6fcc637f8 | |
Vitaliy Filippov | dedb762b74 | |
Vitaliy Filippov | d55fe7f5eb | |
Agustin Alexander | 03a2c58403 | |
Vitaliy Filippov | b112fccd37 | |
Vitaliy Filippov | cb3c586862 | |
Vitaliy Filippov | 3a28149bad | |
Vitaliy Filippov | e36f362abb | |
Thomas Klausner | 30763e2f9d | |
Thomas Klausner | 814b724cc4 | |
Jan Katins | 050fcf53a0 | |
Jan Katins | 9076e3aed2 | |
Jan Katins | 27ca714e56 | |
Jan Katins | e0965f7fb8 | |
Jan Katins | 5510ca1b06 | |
Jan Katins | 4dfc9a5461 | |
Jan Katins | 378df2867b | |
Jan Schulz | 0b53b08f57 | |
Jan Katins | d7008b81c1 | |
Jan Schulz | 1bab757298 | |
Jan Schulz | c7a949dedd | |
Agustin Alexander | a8b84930f9 | |
crborga | 97a97265fd | |
Michael Bartlett | 7b42e2da5b | |
Vitaliy Filippov | ffb744a59b | |
Vitaliy Filippov | 03bbc20892 | |
Vitaliy Filippov | 1456e6e0ba | |
Vitaliy Filippov | 2ae61ab1e6 | |
Vitaliy Filippov | a295641cb0 | |
Vitaliy Filippov | 2ddc0230da | |
Vitaliy Filippov | 58a689a3db | |
Vitaliy Filippov | af2a6de268 | |
Vitaliy Filippov | fa3c39480a | |
lmmamercy | dc1946498d | |
Daniel Deptford | 6aeec25778 | |
Giuseppe Corbelli | b4d6ac055f | |
Vitaliy Filippov | a3cce71643 | |
Vitaliy Filippov | 37df02adff | |
AmonRaNet | 94182975db | |
Frank Bicknell | de6404b246 | |
Vitaliy Filippov | bdcb76efc0 | |
AmonRaNet | 79312b9c53 | |
daniel | ad7844ba29 | |
Vitaliy Filippov | 39299096cf | |
Vitaliy Filippov | 548ea362f3 | |
Vitaliy Filippov | 13ac9bd1a9 | |
Gianluca Recchia | fe17d715fc | |
Vitaliy Filippov | f039e38c4c | |
Vitaliy Filippov | 294b1b1ded | |
Vitaliy Filippov | 00d5148c19 | |
Jan Schulz | d4742fd470 | |
Andrew Udvare | 6354689a93 | |
Andrew Udvare | 7ef50e9523 | |
Tatsh | e972a77790 | |
Andrew Udvare | 8e3c3d25cb | |
Jan Schulz | f9cad3b635 | |
Jan Schulz | e3e18fe16a | |
Alex Martin | 60ecd5b0ff | |
Vitaliy Filippov | cf51167b55 | |
a martin | d4a0445873 | |
Vitaliy Filippov | cbac85a8e5 | |
Vitaliy Filippov | 93cae255fa | |
Vitaliy Filippov | 98d5b92e5d | |
Vitaliy Filippov | f516356d33 | |
Vitaliy Filippov | 34d4582688 | |
Vitaliy Filippov | 6fe5f0c89b | |
Vitaliy Filippov | 6b03e244df | |
Vitaliy Filippov | ad9b66a92c | |
Vitaliy Filippov | 8384f0c983 | |
Vitaliy Filippov | 59499327e2 | |
psfloyd | b47dd70f35 | |
Topher Sterling | f19406e57d | |
Topher Sterling | f0e38924d0 | |
Topher Sterling | 1443dd9f18 | |
Topher Sterling | f5e1b67b4c | |
Topher Sterling | 48bfe66bab | |
Topher Sterling | fddabe760f | |
Topher Sterling | 80bbe5f940 | |
Jan Katins | 2de114b212 | |
Jan Katins | cdea48edc1 | |
Carlos J | b06f66c86d | |
Carlos J | bff462211a | |
Pavel Roskin | 8f2f217da6 | |
Pavel Roskin | 2e75fc669b | |
psfloyd | d698d596d9 | |
psfloyd | 94bda77b1b | |
psfloyd | 70c5c64373 | |
Vitaliy Filippov | 84c57c121e | |
Vitaliy Filippov | 2155b755c4 | |
Vitaliy Filippov | 76d76005ff | |
Vitaliy Filippov | dcbdbfcd1e | |
Vitaliy Filippov | 262edd71cc | |
Vitaliy Filippov | d84e0d9b9c | |
Vitaliy Filippov | 4e5c61b668 | |
Vitaliy Filippov | 5561c0e847 | |
Vasilis | 81a7c34c22 | |
Daniel Deptford | 31b5ab59fc | |
Vitaliy Filippov | 3c90425b8f | |
Mitos Kalandiel | 90c603dc4c | |
Vitaliy Filippov | 4fe1e71d5b | |
Jan Schulz | d996989c29 | |
Vitaliy Filippov | 4b6cf69cbb | |
Vitaliy Filippov | 63bb138b2d | |
Vitaliy Filippov | a0aff5b146 | |
Vitaliy Filippov | e8a8801c75 | |
Jan | 974733ff46 | |
Doron Behar | bddf8cf6b0 | |
Albrecht Scheidig | f8ea376769 | |
Rafael Sadowski | 150a817628 | |
Rafael Sadowski | 32f62c0850 | |
Vitaliy Filippov | 7bbb01c301 | |
Vitaliy Filippov | 04b86d1c87 | |
Vitaliy Filippov | 08e29070c2 | |
Vitaliy Filippov | e3f948496d | |
Vitaliy Filippov | 68e0e5afe5 | |
Vitaliy Filippov | 67b5b05e17 | |
Blackrabbit | f27e3724de | |
Vitaliy Filippov | 11a3d788d0 | |
Vitaliy Filippov | dd77c99872 | |
Vitaliy Filippov | 59d02a65cb | |
Vitaliy Filippov | d35c849468 | |
Vitaliy Filippov | 199a050099 | |
Vitaliy Filippov | 195e5091c7 | |
Vitaliy Filippov | a756414e71 | |
Vitaliy Filippov | e91a2b598b | |
Vitaliy Filippov | 457d849745 | |
Vitaliy Filippov | fbf8f1663f | |
Vitaliy Filippov | 2727160257 | |
Vitaliy Filippov | 76827a760c | |
Vitaliy Filippov | 46dfa1abfa | |
Vitaliy Filippov | 1bd86307c6 | |
Vitaliy Filippov | 44d3ddf928 | |
Vitaliy Filippov | 5327016d36 | |
Vitaliy Filippov | 62e26118f2 | |
Vitaliy Filippov | 40e33cb524 | |
Vitaliy Filippov | cfb8ff08b3 | |
Vitaliy Filippov | ae06eccb38 | |
Vitaliy Filippov | 98416354f7 | |
Vitaliy Filippov | 86acd18978 | |
Vitaliy Filippov | 4a4e22026b | |
Vitaliy Filippov | b6c0013052 | |
Vitaliy Filippov | 60acb75967 | |
Vitaliy Filippov | c76cdecad2 | |
Vitaliy Filippov | 0112330c1d | |
Vitaliy Filippov | d606a360be | |
Vitaliy Filippov | 9d8c77d0bd | |
Vitaliy Filippov | 3b9aa4f2aa | |
Vitaliy Filippov | ca4a0b6b80 | |
Josua Mayer | b49a89ad34 | |
Vitaliy Filippov | c647c5f89f | |
Vitaliy Filippov | 00311e8365 | |
Vitaliy Filippov | 23fa985bdb | |
Vitaliy Filippov | 8f640ebdad | |
Vitaliy Filippov | af05c7c626 | |
Vitaliy Filippov | 4edff0a816 | |
Vitaliy Filippov | 5381919e5b |
|
@ -0,0 +1,5 @@
|
||||||
|
*
|
||||||
|
!cmake
|
||||||
|
!grive
|
||||||
|
!libgrive
|
||||||
|
!CMakeLists.txt
|
|
@ -14,3 +14,14 @@ bgrive/bgrive
|
||||||
grive/grive
|
grive/grive
|
||||||
libgrive/btest
|
libgrive/btest
|
||||||
*.cmake
|
*.cmake
|
||||||
|
|
||||||
|
debian/debhelper-build-stamp
|
||||||
|
debian/files
|
||||||
|
debian/grive.debhelper.log
|
||||||
|
debian/grive.substvars
|
||||||
|
debian/grive/
|
||||||
|
debian/.debhelper
|
||||||
|
|
||||||
|
obj-x86_64-linux-gnu/
|
||||||
|
|
||||||
|
.idea
|
||||||
|
|
|
@ -1,11 +1,27 @@
|
||||||
cmake_minimum_required(VERSION 2.8)
|
cmake_minimum_required(VERSION 2.8)
|
||||||
|
project(grive2)
|
||||||
|
|
||||||
|
include(GNUInstallDirs)
|
||||||
|
|
||||||
# Grive version. remember to update it for every new release!
|
# Grive version. remember to update it for every new release!
|
||||||
set( GRIVE_VERSION "0.4.2" )
|
set( GRIVE_VERSION "0.5.3" CACHE STRING "Grive version" )
|
||||||
|
message(WARNING "Version to build: ${GRIVE_VERSION}")
|
||||||
|
|
||||||
# common compile options
|
# common compile options
|
||||||
add_definitions( -DVERSION="${GRIVE_VERSION}" )
|
add_definitions( -DVERSION="${GRIVE_VERSION}" )
|
||||||
add_definitions( -D_FILE_OFFSET_BITS=64 )
|
add_definitions( -D_FILE_OFFSET_BITS=64 -std=c++0x )
|
||||||
|
if ( APPLE )
|
||||||
|
add_definitions( -Doff64_t=off_t )
|
||||||
|
endif ( APPLE )
|
||||||
|
|
||||||
|
find_program(
|
||||||
|
HAVE_SYSTEMD systemd
|
||||||
|
PATHS /lib/systemd /usr/lib/systemd
|
||||||
|
NO_DEFAULT_PATH
|
||||||
|
)
|
||||||
|
if ( HAVE_SYSTEMD )
|
||||||
|
add_subdirectory( systemd )
|
||||||
|
endif( HAVE_SYSTEMD )
|
||||||
|
|
||||||
add_subdirectory( libgrive )
|
add_subdirectory( libgrive )
|
||||||
add_subdirectory( grive )
|
add_subdirectory( grive )
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
FROM alpine:3.7 as build
|
||||||
|
|
||||||
|
RUN apk add make cmake g++ libgcrypt-dev yajl-dev yajl \
|
||||||
|
boost-dev curl-dev expat-dev cppunit-dev binutils-dev \
|
||||||
|
pkgconfig
|
||||||
|
|
||||||
|
ADD . /grive2
|
||||||
|
|
||||||
|
RUN mkdir /grive2/build \
|
||||||
|
&& cd /grive2/build \
|
||||||
|
&& cmake .. \
|
||||||
|
&& make -j4 install
|
||||||
|
|
||||||
|
FROM alpine:3.7
|
||||||
|
|
||||||
|
RUN apk add yajl libcurl libgcrypt boost-program_options boost-regex libstdc++ boost-system \
|
||||||
|
&& apk add boost-filesystem --repository=http://dl-cdn.alpinelinux.org/alpine/edge/main
|
||||||
|
|
||||||
|
COPY --from=build /usr/local/bin/grive /bin/grive
|
||||||
|
RUN chmod 777 /bin/grive \
|
||||||
|
&& mkdir /data
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
WORKDIR /data
|
||||||
|
ENTRYPOINT grive
|
205
README.md
205
README.md
|
@ -1,14 +1,14 @@
|
||||||
# Grive2 0.4.2
|
# Grive2 0.5.3
|
||||||
|
|
||||||
28 Dec 2015, Vitaliy Filippov
|
09 Nov 2022, Vitaliy Filippov
|
||||||
|
|
||||||
http://yourcmc.ru/wiki/Grive2
|
http://yourcmc.ru/wiki/Grive2
|
||||||
|
|
||||||
This is the fork of original "Grive" (https://github.com/Grive/grive) Google Drive client
|
This is the fork of original "Grive" (https://github.com/Grive/grive) Google Drive client
|
||||||
with the support for the new Drive REST API and partial sync.
|
with the support for the new Drive REST API and partial sync.
|
||||||
|
|
||||||
Grive can be considered still beta or pre-beta quality. It simply downloads all the files in your
|
Grive simply downloads all the files in your Google Drive into the current directory.
|
||||||
Google Drive into the current directory. After you make some changes to the local files, run
|
After you make some changes to the local files, run
|
||||||
grive again and it will upload your changes back to your Google Drive. New files created locally
|
grive again and it will upload your changes back to your Google Drive. New files created locally
|
||||||
or in Google Drive will be uploaded or downloaded respectively. Deleted files will also be "removed".
|
or in Google Drive will be uploaded or downloaded respectively. Deleted files will also be "removed".
|
||||||
Currently Grive will NOT destroy any of your files: it will only move the files to a
|
Currently Grive will NOT destroy any of your files: it will only move the files to a
|
||||||
|
@ -16,11 +16,135 @@ directory named .trash or put them in the Google Drive trash. You can always rec
|
||||||
|
|
||||||
There are a few things that Grive does not do at the moment:
|
There are a few things that Grive does not do at the moment:
|
||||||
- continously wait for changes in file system or in Google Drive to occur and upload.
|
- continously wait for changes in file system or in Google Drive to occur and upload.
|
||||||
A sync is only performed when you run Grive, and it calculates checksums for all files every time.
|
A sync is only performed when you run Grive (there are workarounds for almost
|
||||||
|
continuous sync. See below).
|
||||||
- symbolic links support.
|
- symbolic links support.
|
||||||
- support for Google documents.
|
- support for Google documents.
|
||||||
|
|
||||||
These may be added in the future, possibly the next release.
|
These may be added in the future.
|
||||||
|
|
||||||
|
Enjoy!
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
When Grive is run for the first time, you should use the "-a" argument to grant
|
||||||
|
permission to Grive to access to your Google Drive:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd $HOME
|
||||||
|
mkdir google-drive
|
||||||
|
cd google-drive
|
||||||
|
grive -a
|
||||||
|
```
|
||||||
|
|
||||||
|
A URL should be printed. Go to the link. You will need to login to your Google
|
||||||
|
account if you haven't done so. After granting the permission to Grive, the
|
||||||
|
authorization code will be forwarded to the Grive application and you will be
|
||||||
|
redirected to a localhost web page confirming the authorization.
|
||||||
|
|
||||||
|
If everything works fine, Grive will create .grive and .grive\_state files in your
|
||||||
|
current directory. It will also start downloading files from your Google Drive to
|
||||||
|
your current directory.
|
||||||
|
|
||||||
|
To resync the direcory, run `grive` in the folder.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd $HOME/google-drive
|
||||||
|
grive
|
||||||
|
```
|
||||||
|
|
||||||
|
### Exclude specific files and folders from sync: .griveignore
|
||||||
|
|
||||||
|
Rules are similar to Git's .gitignore, but may differ slightly due to the different
|
||||||
|
implementation.
|
||||||
|
|
||||||
|
- lines that start with # are comments
|
||||||
|
- leading and trailing spaces ignored unless escaped with \
|
||||||
|
- non-empty lines without ! in front are treated as "exclude" patterns
|
||||||
|
- non-empty lines with ! in front are treated as "include" patterns
|
||||||
|
and have a priority over all "exclude" ones
|
||||||
|
- patterns are matched against the filenames relative to the grive root
|
||||||
|
- a/**/b matches any number of subpaths between a and b, including 0
|
||||||
|
- **/a matches `a` inside any directory
|
||||||
|
- b/** matches everything inside `b`, but not b itself
|
||||||
|
- \* matches any number of any characters except /
|
||||||
|
- ? matches any character except /
|
||||||
|
- .griveignore itself isn't ignored by default, but you can include it in itself to ignore
|
||||||
|
|
||||||
|
|
||||||
|
### Scheduled syncs and syncs on file change events
|
||||||
|
|
||||||
|
There are tools which you can use to enable both scheduled syncs and syncs
|
||||||
|
when a file changes. Together these gives you an experience almost like the
|
||||||
|
Google Drive clients on other platforms (it misses the almost instantious
|
||||||
|
download of changed files in the google drive).
|
||||||
|
|
||||||
|
Grive installs such a basic solution which uses inotify-tools together with
|
||||||
|
systemd timer and services. You can enable it for a folder in your `$HOME`
|
||||||
|
directory (in this case the `$HOME/google-drive`):
|
||||||
|
|
||||||
|
First install the `inotify-tools` (seems to be named like that in all major distros):
|
||||||
|
test that it works by calling `inotifywait -h`.
|
||||||
|
|
||||||
|
Prepare a Google Drive folder in your $HOME directory with `grive -a`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 'google-drive' is the name of your Google Drive folder in your $HOME directory
|
||||||
|
systemctl --user enable grive@$(systemd-escape google-drive).service
|
||||||
|
systemctl --user start grive@$(systemd-escape google-drive).service
|
||||||
|
```
|
||||||
|
|
||||||
|
You can enable and start this unit for multiple folders in your `$HOME`
|
||||||
|
directory if you need to sync with multiple google accounts.
|
||||||
|
|
||||||
|
You can also only enable the time based syncing or the changes based syncing
|
||||||
|
by only directly enabling and starting the corresponding unit:
|
||||||
|
`grive-changes@$(systemd-escape google-drive).service` or
|
||||||
|
`grive-timer@$(systemd-escape google-drive).timer`.
|
||||||
|
|
||||||
|
### Shared files
|
||||||
|
|
||||||
|
Files and folders which are shared with you don't automatically show up in
|
||||||
|
your folder. They need to be added explicitly to your Google Drive: go to the
|
||||||
|
Google Drive website, right click on the file or folder and chose 'Add to My
|
||||||
|
Drive'.
|
||||||
|
|
||||||
|
### Different OAuth2 client to workaround over quota and google approval issues
|
||||||
|
|
||||||
|
Google recently started to restrict access for unapproved applications:
|
||||||
|
https://developers.google.com/drive/api/v3/about-auth?hl=ru
|
||||||
|
|
||||||
|
Grive2 is currently awaiting approval but it seems it will take forever.
|
||||||
|
Also even if they approve it the default Client ID supplied with grive may
|
||||||
|
exceed quota and grive will then fail to sync.
|
||||||
|
|
||||||
|
You can supply your own OAuth2 client credentials to work around these problems
|
||||||
|
by following these steps:
|
||||||
|
|
||||||
|
1. Go to https://console.developers.google.com/apis/api/drive.googleapis.com
|
||||||
|
2. Choose a project (you might need to create one first)
|
||||||
|
3. Go to https://console.developers.google.com/apis/library/drive.googleapis.com and
|
||||||
|
"Enable" the Google Drive APIs
|
||||||
|
4. Go to https://console.cloud.google.com/apis/credentials and click "Create credentials > Help me choose"
|
||||||
|
5. In the "Find out what credentials you need" dialog, choose:
|
||||||
|
- Which API are you using: "Google Drive API"
|
||||||
|
- Where will you be calling the API from: "Other UI (...CLI...)"
|
||||||
|
- What data will you be accessing: "User Data"
|
||||||
|
6. In the next steps create a client id (name doesn't matter) and
|
||||||
|
setup the consent screen (defaults are ok, no need for any URLs)
|
||||||
|
7. The needed "Client ID" and "Client Secret" are either in the shown download
|
||||||
|
or can later found by clicking on the created credential on
|
||||||
|
https://console.developers.google.com/apis/credentials/
|
||||||
|
8. When you change client ID/secret in an existing Grive folder you must first delete
|
||||||
|
the old `.grive` configuration file.
|
||||||
|
9. Call `grive -a --id <client_id> --secret <client_secret>` and follow the steps
|
||||||
|
to authenticate the OAuth2 client to allow it to access your drive folder.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
For the detailed instructions, see http://yourcmc.ru/wiki/Grive2#Installation
|
||||||
|
|
||||||
|
### Install dependencies
|
||||||
|
|
||||||
You need the following libraries:
|
You need the following libraries:
|
||||||
|
|
||||||
|
@ -39,13 +163,28 @@ There are also some optional dependencies:
|
||||||
On a Debian/Ubuntu/Linux Mint machine just run the following command to install all
|
On a Debian/Ubuntu/Linux Mint machine just run the following command to install all
|
||||||
these packages:
|
these packages:
|
||||||
|
|
||||||
sudo apt-get install git cmake build-essential libgcrypt11-dev libyajl-dev \
|
sudo apt-get install git cmake build-essential libgcrypt20-dev libyajl-dev \
|
||||||
libboost-all-dev libcurl4-openssl-dev libexpat1-dev libcppunit-dev binutils-dev
|
libboost-all-dev libcurl4-openssl-dev libexpat1-dev libcppunit-dev binutils-dev \
|
||||||
|
debhelper zlib1g-dev dpkg-dev pkg-config
|
||||||
|
|
||||||
|
Fedora:
|
||||||
|
|
||||||
|
sudo dnf install git cmake libgcrypt-devel gcc-c++ libstdc++ yajl-devel boost-devel libcurl-devel expat-devel binutils zlib
|
||||||
|
|
||||||
|
|
||||||
FreeBSD:
|
FreeBSD:
|
||||||
|
|
||||||
pkg install git cmake boost-libs yajl libgcrypt pkgconf cppunit libbfd
|
pkg install git cmake boost-libs yajl libgcrypt pkgconf cppunit libbfd
|
||||||
|
|
||||||
|
### Build Debian packages
|
||||||
|
|
||||||
|
On a Debian/Ubuntu/Linux Mint you can use `dpkg-buildpackage` utility from `dpkg-dev` package
|
||||||
|
to build grive. Just clone the repository, `cd` into it and run
|
||||||
|
|
||||||
|
dpkg-buildpackage -j4 --no-sign
|
||||||
|
|
||||||
|
### Manual build
|
||||||
|
|
||||||
Grive uses cmake to build. Basic install sequence is
|
Grive uses cmake to build. Basic install sequence is
|
||||||
|
|
||||||
mkdir build
|
mkdir build
|
||||||
|
@ -54,22 +193,45 @@ Grive uses cmake to build. Basic install sequence is
|
||||||
make -j4
|
make -j4
|
||||||
sudo make install
|
sudo make install
|
||||||
|
|
||||||
For the detailed instructions, see http://yourcmc.ru/wiki/Grive2#Installation
|
Alternativly you can define your own client_id and client_secret during build
|
||||||
|
|
||||||
When Grive is run for the first time, you should use the "-a" argument to grant
|
mkdir build
|
||||||
permission to Grive to access to your Google Drive. A URL should be printed.
|
cd build
|
||||||
Go to the link. You will need to login to your Google account if you haven't
|
cmake .. "-DAPP_ID:STRING=<client_id>" "-DAPP_SECRET:STRING=<client_secret>"
|
||||||
done so. After granting the permission to Grive, the browser will show you
|
make -j4
|
||||||
an authenication code. Copy-and-paste that to the standard input of Grive.
|
sudo make install
|
||||||
|
|
||||||
If everything works fine, Grive will create .grive and .grive_state files in your
|
|
||||||
current directory. It will also start downloading files from your Google Drive to
|
|
||||||
your current directory.
|
|
||||||
|
|
||||||
Enjoy!
|
|
||||||
|
|
||||||
## Version History
|
## Version History
|
||||||
|
|
||||||
|
### Grive2 v0.5.3
|
||||||
|
|
||||||
|
- Implement Google OAuth loopback IP redirect flow
|
||||||
|
- Various small fixes
|
||||||
|
|
||||||
|
### Grive2 v0.5.1
|
||||||
|
|
||||||
|
- Support for .griveignore
|
||||||
|
- Automatic sync solution based on inotify-tools and systemd
|
||||||
|
- no-remote-new and upload-only modes
|
||||||
|
- Ignore regexp does not persist anymore (note that Grive will still track it to not
|
||||||
|
accidentally delete remote files when changing ignore regexp)
|
||||||
|
- Added options to limit upload and download speed
|
||||||
|
- Faster upload of new and changed files. Now Grive uploads files without first calculating
|
||||||
|
md5 checksum when file is created locally or when its size changes.
|
||||||
|
- Added -P/--progress-bar option to print ASCII progress bar for each processed file (pull request by @svartkanin)
|
||||||
|
- Added command-line options to specify your own client_id and client_secret
|
||||||
|
- Now grive2 skips links, sockets, fifos and other unusual files
|
||||||
|
- Various small build fixes
|
||||||
|
|
||||||
|
### Grive2 v0.5
|
||||||
|
|
||||||
|
- Much faster and more correct synchronisation using local modification time and checksum cache (similar to git index)
|
||||||
|
- Automatic move/rename detection, -m option removed
|
||||||
|
- force option works again
|
||||||
|
- Instead of crashing on sync exceptions Grive will give a warning and attempt to sync failed files again during the next run.
|
||||||
|
- Revision support works again. Grive 0.4.x always created new revisions for all files during sync, regardless of the absence of the --new-rev option.
|
||||||
|
- Shared files now sync correctly
|
||||||
|
|
||||||
### Grive2 v0.4.2
|
### Grive2 v0.4.2
|
||||||
|
|
||||||
- Option to exclude files by perl regexp
|
- Option to exclude files by perl regexp
|
||||||
|
@ -89,7 +251,7 @@ Known issues:
|
||||||
|
|
||||||
First fork release, by Vitaliy Filippov / vitalif at mail*ru
|
First fork release, by Vitaliy Filippov / vitalif at mail*ru
|
||||||
- Support for the new Google Drive REST API (old "Document List" API is shut down by Google 20 April 2015)
|
- Support for the new Google Drive REST API (old "Document List" API is shut down by Google 20 April 2015)
|
||||||
- REAL support for partial sync: syncs only one subdirectory with `grive -d subdir`
|
- REAL support for partial sync: syncs only one subdirectory with `grive -s subdir`
|
||||||
- Major refactoring - a lot of dead code removed, JSON-C is not used anymore, API-specific code is split from non-API-specific
|
- Major refactoring - a lot of dead code removed, JSON-C is not used anymore, API-specific code is split from non-API-specific
|
||||||
- Some stability fixes from Visa Putkinen https://github.com/visap/grive/commits/visa
|
- Some stability fixes from Visa Putkinen https://github.com/visap/grive/commits/visa
|
||||||
- Slightly reduce number of syscalls when reading local files.
|
- Slightly reduce number of syscalls when reading local files.
|
||||||
|
@ -105,3 +267,4 @@ New features:
|
||||||
- #87: support for revisions
|
- #87: support for revisions
|
||||||
- #86: ~~partial sync (contributed by justin at tierramedia.com)~~ that's not partial sync,
|
- #86: ~~partial sync (contributed by justin at tierramedia.com)~~ that's not partial sync,
|
||||||
that's only support for specifying local path on command line
|
that's only support for specifying local path on command line
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,6 @@
|
||||||
find_library( DL_LIBRARY NAMES dl PATH ${CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES} )
|
|
||||||
find_library( BFD_LIBRARY NAMES bfd PATH ${CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES} )
|
find_library( BFD_LIBRARY NAMES bfd PATH ${CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES} )
|
||||||
|
|
||||||
if ( DL_LIBRARY AND BFD_LIBRARY )
|
if ( BFD_LIBRARY )
|
||||||
set( BFD_FOUND TRUE )
|
set( BFD_FOUND TRUE )
|
||||||
endif (DL_LIBRARY AND BFD_LIBRARY)
|
|
||||||
|
|
||||||
if ( BFD_FOUND )
|
|
||||||
|
|
||||||
message( STATUS "Found libbfd: ${BFD_LIBRARY}")
|
message( STATUS "Found libbfd: ${BFD_LIBRARY}")
|
||||||
|
endif ( BFD_LIBRARY )
|
||||||
endif ( BFD_FOUND )
|
|
||||||
|
|
|
@ -27,6 +27,9 @@ IF(LIBGCRYPTCONFIG_EXECUTABLE)
|
||||||
|
|
||||||
EXEC_PROGRAM(${LIBGCRYPTCONFIG_EXECUTABLE} ARGS --cflags RETURN_VALUE _return_VALUE OUTPUT_VARIABLE LIBGCRYPT_CFLAGS)
|
EXEC_PROGRAM(${LIBGCRYPTCONFIG_EXECUTABLE} ARGS --cflags RETURN_VALUE _return_VALUE OUTPUT_VARIABLE LIBGCRYPT_CFLAGS)
|
||||||
|
|
||||||
|
string(REPLACE "fgrep: warning: fgrep is obsolescent; using grep -F" "" LIBGCRYPT_LIBRARIES "${LIBGCRYPT_LIBRARIES}")
|
||||||
|
string(STRIP "${LIBGCRYPT_LIBRARIES}" LIBGCRYPT_LIBRARIES)
|
||||||
|
|
||||||
IF(${LIBGCRYPT_CFLAGS} MATCHES "\n")
|
IF(${LIBGCRYPT_CFLAGS} MATCHES "\n")
|
||||||
SET(LIBGCRYPT_CFLAGS " ")
|
SET(LIBGCRYPT_CFLAGS " ")
|
||||||
ENDIF(${LIBGCRYPT_CFLAGS} MATCHES "\n")
|
ENDIF(${LIBGCRYPT_CFLAGS} MATCHES "\n")
|
||||||
|
|
|
@ -0,0 +1,63 @@
|
||||||
|
#compdef grive
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Copyright (c) 2015 Github zsh-users - http://github.com/zsh-users
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are met:
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer in the
|
||||||
|
# documentation and/or other materials provided with the distribution.
|
||||||
|
# * Neither the name of the zsh-users nor the
|
||||||
|
# names of its contributors may be used to endorse or promote products
|
||||||
|
# derived from this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
# DISCLAIMED. IN NO EVENT SHALL ZSH-USERS BE LIABLE FOR ANY
|
||||||
|
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||||
|
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Description
|
||||||
|
# -----------
|
||||||
|
#
|
||||||
|
# Completion script for Grive (https://github.com/vitalif/grive2)
|
||||||
|
#
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Authors
|
||||||
|
# -------
|
||||||
|
#
|
||||||
|
# * Doron Behar <https://github.com/doronbehar>
|
||||||
|
#
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# -*- mode: zsh; sh-indentation: 2; indent-tabs-mode: nil; sh-basic-offset: 2; -*-
|
||||||
|
# vim: ft=zsh sw=2 ts=2 et
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
local curcontext="$curcontext" state line ret=1
|
||||||
|
typeset -A opt_args
|
||||||
|
|
||||||
|
_arguments -C \
|
||||||
|
'(-h --help)'{-h,--help}'[Produce help message]' \
|
||||||
|
'(-v --version)'{-v,--version}'[Display Grive version]' \
|
||||||
|
'(-a --auth)'{-a,--auth}'[Request authorization token]' \
|
||||||
|
'(-p --path)'{-p,--path}'[Root directory to sync]' \
|
||||||
|
'(-s --dir)'{-s,--dir}'[Single subdirectory to sync (remembered for next runs)]' \
|
||||||
|
'(-V --verbose)'{-V,--verbose}'[Verbose mode. Enable more messages than normal.]' \
|
||||||
|
'(--log-http)--log-http[Log all HTTP responses in this file for debugging.]' \
|
||||||
|
'(--new-rev)--new-rev[Create,new revisions in server for updated files.]' \
|
||||||
|
'(-d --debug)'{-d,--debug}'[Enable debug level messages. Implies -v.]' \
|
||||||
|
'(-l --log)'{-l,--log}'[Set log output filename.]' \
|
||||||
|
'(-f --force)'{-f,--force}'[Force grive to always download a file from Google Drive instead of uploading it.]' \
|
||||||
|
'(--dry-run)--dry-run[Only,detect which files need to be uploaded/downloaded,without actually performing them.]' \
|
||||||
|
'(--ignore)--ignore[Perl,RegExp to ignore files (matched against relative paths, remembered for next runs) ]' \
|
||||||
|
'*: :_files' && ret=0
|
||||||
|
|
||||||
|
return ret
|
|
@ -1,3 +1,24 @@
|
||||||
|
grive2 (0.5.3) unstable; urgency=medium
|
||||||
|
|
||||||
|
* Implement Google OAuth loopback IP redirect flow
|
||||||
|
* Various small fixes
|
||||||
|
|
||||||
|
-- Vitaliy Filippov <vitalif@yourcmc.ru> Wed, 09 Nov 2022 12:42:28 +0300
|
||||||
|
|
||||||
|
grive2 (0.5.2+git20210315) unstable; urgency=medium
|
||||||
|
|
||||||
|
* Newer dev version
|
||||||
|
* Add systemd unit files and helper script for automatic syncs
|
||||||
|
* Add possibility to change client id and secret and save it between runs
|
||||||
|
|
||||||
|
-- Vitaliy Filippov <vitalif@yourcmc.ru> Wed, 31 Jul 2016 22:04:53 +0300
|
||||||
|
|
||||||
|
grive2 (0.5+git20160114) unstable; urgency=medium
|
||||||
|
|
||||||
|
* Newer release, with support for faster sync and rename detection
|
||||||
|
|
||||||
|
-- Vitaliy Filippov <vitalif@yourcmc.ru> Sun, 03 Jan 2016 12:51:55 +0300
|
||||||
|
|
||||||
grive2 (0.4.1+git20151011) unstable; urgency=medium
|
grive2 (0.4.1+git20151011) unstable; urgency=medium
|
||||||
|
|
||||||
* Add Debian packaging scripts to the official repository
|
* Add Debian packaging scripts to the official repository
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
7
|
11
|
||||||
|
|
|
@ -2,7 +2,7 @@ Source: grive2
|
||||||
Section: net
|
Section: net
|
||||||
Priority: optional
|
Priority: optional
|
||||||
Maintainer: Vitaliy Filippov <vitalif@mail.ru>
|
Maintainer: Vitaliy Filippov <vitalif@mail.ru>
|
||||||
Build-Depends: debhelper, cmake, pkg-config, zlib1g-dev, libcurl4-openssl-dev, libstdc++6-4.4-dev | libstdc++-4.9-dev | libstdc++-5-dev, libboost-filesystem-dev, libboost-program-options-dev, libboost-test-dev, libboost-regex-dev, libexpat1-dev, binutils-dev, libgcrypt-dev, libyajl-dev
|
Build-Depends: debhelper, cmake, pkg-config, zlib1g-dev, libcurl4-openssl-dev | libcurl4-gnutls-dev, libboost-filesystem-dev, libboost-program-options-dev, libboost-test-dev, libboost-regex-dev, libexpat1-dev, libgcrypt-dev, libyajl-dev
|
||||||
Standards-Version: 3.9.6
|
Standards-Version: 3.9.6
|
||||||
Homepage: https://yourcmc.ru/wiki/Grive2
|
Homepage: https://yourcmc.ru/wiki/Grive2
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,7 @@
|
||||||
#!/usr/bin/make -f
|
#!/usr/bin/make -f
|
||||||
|
|
||||||
|
override_dh_auto_configure:
|
||||||
|
dh_auto_configure -- -DHAVE_SYSTEMD=1
|
||||||
|
|
||||||
%:
|
%:
|
||||||
dh $@ --buildsystem=cmake --parallel
|
dh $@ --buildsystem=cmake --parallel --builddirectory=build
|
||||||
|
|
|
@ -17,13 +17,28 @@ add_executable( grive_executable
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries( grive_executable
|
target_link_libraries( grive_executable
|
||||||
${Boost_LIBRARIES}
|
|
||||||
grive
|
grive
|
||||||
)
|
)
|
||||||
|
|
||||||
|
set(DEFAULT_APP_ID "615557989097-i93d4d1ojpen0m0dso18ldr6orjkidgf.apps.googleusercontent.com")
|
||||||
|
set(DEFAULT_APP_SECRET "xiM8Apu_WuRRdheNelJcNtOD")
|
||||||
|
set(APP_ID ${DEFAULT_APP_ID} CACHE STRING "Application Id")
|
||||||
|
set(APP_SECRET ${DEFAULT_APP_SECRET} CACHE STRING "Application Secret")
|
||||||
|
|
||||||
|
target_compile_definitions ( grive_executable
|
||||||
|
PRIVATE
|
||||||
|
-DAPP_ID="${APP_ID}"
|
||||||
|
-DAPP_SECRET="${APP_SECRET}"
|
||||||
|
)
|
||||||
|
|
||||||
set_target_properties( grive_executable
|
set_target_properties( grive_executable
|
||||||
PROPERTIES OUTPUT_NAME grive
|
PROPERTIES OUTPUT_NAME grive
|
||||||
)
|
)
|
||||||
|
|
||||||
install(TARGETS grive_executable RUNTIME DESTINATION bin)
|
install(TARGETS grive_executable RUNTIME DESTINATION bin)
|
||||||
|
|
||||||
|
if ( ${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD" OR ${CMAKE_SYSTEM_NAME} MATCHES "OpenBSD" )
|
||||||
|
install(FILES doc/grive.1 DESTINATION man/man1 )
|
||||||
|
else ( ${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD" OR ${CMAKE_SYSTEM_NAME} MATCHES "OpenBSD" )
|
||||||
install(FILES doc/grive.1 DESTINATION share/man/man1 )
|
install(FILES doc/grive.1 DESTINATION share/man/man1 )
|
||||||
|
endif( ${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD" OR ${CMAKE_SYSTEM_NAME} MATCHES "OpenBSD" )
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
.\" First parameter, NAME, should be all caps
|
.\" First parameter, NAME, should be all caps
|
||||||
.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection
|
.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection
|
||||||
.\" other parameters are allowed: see man(7), man(1)
|
.\" other parameters are allowed: see man(7), man(1)
|
||||||
.TH "GRIVE" 1 "June 19, 2012"
|
.TH "GRIVE" 1 "January 3, 2016"
|
||||||
.SH NAME
|
.SH NAME
|
||||||
grive \- Google Drive client for GNU/Linux
|
grive \- Google Drive client for GNU/Linux
|
||||||
|
|
||||||
|
@ -26,33 +26,102 @@ Requests authorization token from Google
|
||||||
Enable debug level messages. Implies \-V
|
Enable debug level messages. Implies \-V
|
||||||
.TP
|
.TP
|
||||||
\fB\-\-dry-run\fR
|
\fB\-\-dry-run\fR
|
||||||
Only detects which files are needed for download or upload without doing it
|
Only detect which files need to be uploaded/downloaded, without actually performing changes
|
||||||
.TP
|
.TP
|
||||||
\fB\-f, \-\-force\fR
|
\fB\-f, \-\-force\fR
|
||||||
Forces
|
Forces
|
||||||
.I grive
|
.I grive
|
||||||
to always download a file from Google Drive instead uploading it
|
to always download a file from Google Drive instead uploading it
|
||||||
.TP
|
.TP
|
||||||
|
\fB\-u, \-\-upload\-only\fR
|
||||||
|
Forces
|
||||||
|
.I grive
|
||||||
|
to not download anything from Google Drive and only upload local changes to server instead
|
||||||
|
.TP
|
||||||
|
\fB\-n, \-\-no\-remote\-new\fR
|
||||||
|
Forces
|
||||||
|
.I grive
|
||||||
|
to download only files that are changed in Google Drive and already exist locally
|
||||||
|
.TP
|
||||||
\fB\-h\fR, \fB\-\-help\fR
|
\fB\-h\fR, \fB\-\-help\fR
|
||||||
Produces help message
|
Produces help message
|
||||||
.TP
|
.TP
|
||||||
\fB\-l\fR filename, \fB\-\-log\fR filename
|
\fB\-\-ignore\fR <perl_regexp>
|
||||||
Set log output to
|
Ignore files with relative paths matching this Perl Regular Expression.
|
||||||
.I filename
|
.TP
|
||||||
|
\fB\-l\fR <filename>, \fB\-\-log\fR <filename>
|
||||||
|
Write log output to
|
||||||
|
.I <filename>
|
||||||
|
.TP
|
||||||
|
\fB\-\-log\-http\fR <filename_prefix>
|
||||||
|
Log all HTTP responses in files named
|
||||||
|
.I <filename_prefix>YYYY-MM-DD.HHMMSS.txt
|
||||||
|
for debugging
|
||||||
|
.TP
|
||||||
|
\fB\-\-new\-rev\fR
|
||||||
|
Create new revisions in server for updated files
|
||||||
|
.TP
|
||||||
|
\fB\-p\fR <wc_path>, \fB\-\-path\fR <wc_path>
|
||||||
|
Use
|
||||||
|
.I <wc_path>
|
||||||
|
as the working copy root directory
|
||||||
|
.TP
|
||||||
|
\fB\-s\fR <subdir>, \fB\-\-dir\fR <subdir>
|
||||||
|
Sync a single
|
||||||
|
.I <subdir>
|
||||||
|
subdirectory. Internally converted to an ignore regexp.
|
||||||
.TP
|
.TP
|
||||||
\fB\-v\fR, \fB\-\-version\fR
|
\fB\-v\fR, \fB\-\-version\fR
|
||||||
Displays program version
|
Displays program version
|
||||||
.TP
|
.TP
|
||||||
|
\fB\-P\fR, \fB\-\-progress-bar\fR
|
||||||
|
Print ASCII progress bar for each downloaded/uploaded file.
|
||||||
|
.TP
|
||||||
\fB\-V\fR, \fB\-\-verbose\fR
|
\fB\-V\fR, \fB\-\-verbose\fR
|
||||||
Verbose mode. Enables more messages than usual.
|
Verbose mode. Enables more messages than usual.
|
||||||
|
|
||||||
.SH AUTHOR
|
.SH .griveignore
|
||||||
.PP
|
.PP
|
||||||
The software was developed by Nestal Wan.
|
You may create .griveignore in your Grive root and use it to setup
|
||||||
|
exclusion/inclusion rules.
|
||||||
.PP
|
.PP
|
||||||
|
Rules are similar to Git's .gitignore, but may differ slightly due to the different
|
||||||
|
implementation.
|
||||||
|
.IP \[bu]
|
||||||
|
lines that start with # are comments
|
||||||
|
.IP \[bu]
|
||||||
|
leading and trailing spaces ignored unless escaped with \\
|
||||||
|
.IP \[bu]
|
||||||
|
non-empty lines without ! in front are treated as "exclude" patterns
|
||||||
|
.IP \[bu]
|
||||||
|
non-empty lines with ! in front are treated as "include" patterns
|
||||||
|
and have a priority over all "exclude" ones
|
||||||
|
.IP \[bu]
|
||||||
|
patterns are matched against the filenames relative to the grive root
|
||||||
|
.IP \[bu]
|
||||||
|
a/**/b matches any number of subpaths between a and b, including 0
|
||||||
|
.IP \[bu]
|
||||||
|
**/a matches `a` inside any directory
|
||||||
|
.IP \[bu]
|
||||||
|
b/** matches everything inside `b`, but not b itself
|
||||||
|
.IP \[bu]
|
||||||
|
* matches any number of any characters except /
|
||||||
|
.IP \[bu]
|
||||||
|
? matches any character except /
|
||||||
|
.IP \[bu]
|
||||||
|
\[char46]griveignore itself isn't ignored by default, but you can include it in itself to ignore
|
||||||
|
|
||||||
|
.SH AUTHORS
|
||||||
|
.PP
|
||||||
|
Current maintainer is Vitaliy Filippov.
|
||||||
|
.PP
|
||||||
|
Original author was Nestal Wan.
|
||||||
This manpage was written by José Luis Segura Lucas (josel.segura@gmx.es)
|
This manpage was written by José Luis Segura Lucas (josel.segura@gmx.es)
|
||||||
|
.PP
|
||||||
|
The full list of contributors may be found here
|
||||||
|
.I http://yourcmc.ru/wiki/Grive2#Full_list_of_contributors
|
||||||
|
|
||||||
.SH REPORT BUGS
|
.SH REPORT BUGS
|
||||||
.PP
|
.PP
|
||||||
.I https://github.com/Grive/grive
|
.I https://github.com/vitalif/grive2/issues
|
||||||
.I https://groups.google.com/forum/?fromgroups#!forum/grive-devel
|
.I https://groups.google.com/forum/?fromgroups#!forum/grive-devel
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "util/Config.hh"
|
#include "util/Config.hh"
|
||||||
|
#include "util/ProgressBar.hh"
|
||||||
|
|
||||||
#include "base/Drive.hh"
|
#include "base/Drive.hh"
|
||||||
#include "drive2/Syncer2.hh"
|
#include "drive2/Syncer2.hh"
|
||||||
|
@ -45,8 +46,8 @@
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
|
||||||
const std::string client_id = "22314510474.apps.googleusercontent.com" ;
|
const std::string default_id = APP_ID ;
|
||||||
const std::string client_secret = "bl4ufi89h-9MkFlypcI7R785" ;
|
const std::string default_secret = APP_SECRET ;
|
||||||
|
|
||||||
using namespace gr ;
|
using namespace gr ;
|
||||||
namespace po = boost::program_options;
|
namespace po = boost::program_options;
|
||||||
|
@ -66,12 +67,13 @@ void InitGCrypt()
|
||||||
|
|
||||||
void InitLog( const po::variables_map& vm )
|
void InitLog( const po::variables_map& vm )
|
||||||
{
|
{
|
||||||
std::auto_ptr<log::CompositeLog> comp_log(new log::CompositeLog) ;
|
std::unique_ptr<log::CompositeLog> comp_log( new log::CompositeLog ) ;
|
||||||
LogBase* console_log = comp_log->Add( std::auto_ptr<LogBase>( new log::DefaultLog ) ) ;
|
std::unique_ptr<LogBase> def_log( new log::DefaultLog );
|
||||||
|
LogBase* console_log = comp_log->Add( def_log ) ;
|
||||||
|
|
||||||
if ( vm.count( "log" ) )
|
if ( vm.count( "log" ) )
|
||||||
{
|
{
|
||||||
std::auto_ptr<LogBase> file_log(new log::DefaultLog( vm["log"].as<std::string>() )) ;
|
std::unique_ptr<LogBase> file_log( new log::DefaultLog( vm["log"].as<std::string>() ) ) ;
|
||||||
file_log->Enable( log::debug ) ;
|
file_log->Enable( log::debug ) ;
|
||||||
file_log->Enable( log::verbose ) ;
|
file_log->Enable( log::verbose ) ;
|
||||||
file_log->Enable( log::info ) ;
|
file_log->Enable( log::info ) ;
|
||||||
|
@ -96,7 +98,7 @@ void InitLog( const po::variables_map& vm )
|
||||||
console_log->Enable( log::verbose ) ;
|
console_log->Enable( log::verbose ) ;
|
||||||
console_log->Enable( log::debug ) ;
|
console_log->Enable( log::debug ) ;
|
||||||
}
|
}
|
||||||
LogBase::Inst( std::auto_ptr<LogBase>(comp_log.release()) ) ;
|
LogBase::Inst( comp_log.release() ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
int Main( int argc, char **argv )
|
int Main( int argc, char **argv )
|
||||||
|
@ -109,8 +111,11 @@ int Main( int argc, char **argv )
|
||||||
( "help,h", "Produce help message" )
|
( "help,h", "Produce help message" )
|
||||||
( "version,v", "Display Grive version" )
|
( "version,v", "Display Grive version" )
|
||||||
( "auth,a", "Request authorization token" )
|
( "auth,a", "Request authorization token" )
|
||||||
( "path,p", po::value<std::string>(), "Root directory to sync")
|
( "id,i", po::value<std::string>(), "Authentication ID")
|
||||||
( "dir,s", po::value<std::string>(), "Single subdirectory to sync (remembered for next runs)")
|
( "secret,e", po::value<std::string>(), "Authentication secret")
|
||||||
|
( "print-url", "Only print url for request")
|
||||||
|
( "path,p", po::value<std::string>(), "Path to working copy root")
|
||||||
|
( "dir,s", po::value<std::string>(), "Single subdirectory to sync")
|
||||||
( "verbose,V", "Verbose mode. Enable more messages than normal.")
|
( "verbose,V", "Verbose mode. Enable more messages than normal.")
|
||||||
( "log-http", po::value<std::string>(), "Log all HTTP responses in this file for debugging.")
|
( "log-http", po::value<std::string>(), "Log all HTTP responses in this file for debugging.")
|
||||||
( "new-rev", "Create new revisions in server for updated files.")
|
( "new-rev", "Create new revisions in server for updated files.")
|
||||||
|
@ -118,14 +123,25 @@ int Main( int argc, char **argv )
|
||||||
( "log,l", po::value<std::string>(), "Set log output filename." )
|
( "log,l", po::value<std::string>(), "Set log output filename." )
|
||||||
( "force,f", "Force grive to always download a file from Google Drive "
|
( "force,f", "Force grive to always download a file from Google Drive "
|
||||||
"instead of uploading it." )
|
"instead of uploading it." )
|
||||||
|
( "upload-only,u", "Do not download anything from Google Drive, only upload local changes" )
|
||||||
|
( "no-remote-new,n", "Download only files that are changed in Google Drive and already exist locally" )
|
||||||
( "dry-run", "Only detect which files need to be uploaded/downloaded, "
|
( "dry-run", "Only detect which files need to be uploaded/downloaded, "
|
||||||
"without actually performing them." )
|
"without actually performing them." )
|
||||||
( "ignore", po::value<std::string>(), "Perl RegExp to ignore files (matched against relative paths, remembered for next runs)." )
|
( "upload-speed,U", po::value<unsigned>(), "Limit upload speed in kbytes per second" )
|
||||||
( "move,m", po::value<std::vector<std::string> >()->multitoken(), "Syncs, then moves a file (first argument) to new location (second argument) without reuploading or redownloading." )
|
( "download-speed,D", po::value<unsigned>(), "Limit download speed in kbytes per second" )
|
||||||
|
( "progress-bar,P", "Enable progress bar for upload/download of files")
|
||||||
;
|
;
|
||||||
|
|
||||||
po::variables_map vm;
|
po::variables_map vm;
|
||||||
|
try
|
||||||
|
{
|
||||||
po::store( po::parse_command_line( argc, argv, desc ), vm );
|
po::store( po::parse_command_line( argc, argv, desc ), vm );
|
||||||
|
}
|
||||||
|
catch( po::error &e )
|
||||||
|
{
|
||||||
|
std::cerr << "Options are incorrect. Use -h for help\n";
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
po::notify( vm );
|
po::notify( vm );
|
||||||
|
|
||||||
// simple commands that doesn't require log or config
|
// simple commands that doesn't require log or config
|
||||||
|
@ -148,37 +164,61 @@ int Main( int argc, char **argv )
|
||||||
|
|
||||||
Log( "config file name %1%", config.Filename(), log::verbose );
|
Log( "config file name %1%", config.Filename(), log::verbose );
|
||||||
|
|
||||||
std::auto_ptr<http::Agent> http( new http::CurlAgent );
|
std::unique_ptr<http::Agent> http( new http::CurlAgent );
|
||||||
if ( vm.count( "log-http" ) )
|
if ( vm.count( "log-http" ) )
|
||||||
http->SetLog( new http::ResponseLog( vm["log-http"].as<std::string>(), ".txt" ) );
|
http->SetLog( new http::ResponseLog( vm["log-http"].as<std::string>(), ".txt" ) );
|
||||||
|
|
||||||
|
std::unique_ptr<ProgressBar> pb;
|
||||||
|
if ( vm.count( "progress-bar" ) )
|
||||||
|
{
|
||||||
|
pb.reset( new ProgressBar() );
|
||||||
|
http->SetProgressReporter( pb.get() );
|
||||||
|
}
|
||||||
|
|
||||||
if ( vm.count( "auth" ) )
|
if ( vm.count( "auth" ) )
|
||||||
{
|
{
|
||||||
OAuth2 token( http.get(), client_id, client_secret ) ;
|
std::string id = vm.count( "id" ) > 0
|
||||||
|
? vm["id"].as<std::string>()
|
||||||
|
: default_id ;
|
||||||
|
std::string secret = vm.count( "secret" ) > 0
|
||||||
|
? vm["secret"].as<std::string>()
|
||||||
|
: default_secret ;
|
||||||
|
|
||||||
|
OAuth2 token( http.get(), id, secret ) ;
|
||||||
|
|
||||||
|
if ( vm.count("print-url") )
|
||||||
|
{
|
||||||
|
std::cout << token.MakeAuthURL() << std::endl ;
|
||||||
|
return 0 ;
|
||||||
|
}
|
||||||
|
|
||||||
std::cout
|
std::cout
|
||||||
<< "-----------------------\n"
|
<< "-----------------------\n"
|
||||||
<< "Please go to this URL and get an authentication code:\n\n"
|
<< "Please open this URL in your browser to authenticate Grive2:\n\n"
|
||||||
<< token.MakeAuthURL()
|
<< token.MakeAuthURL()
|
||||||
<< std::endl ;
|
<< std::endl ;
|
||||||
|
|
||||||
std::cout
|
if ( !token.GetCode() )
|
||||||
<< "\n-----------------------\n"
|
{
|
||||||
<< "Please input the authentication code here: " << std::endl ;
|
std::cout << "Authentication failed\n";
|
||||||
std::string code ;
|
return -1;
|
||||||
std::cin >> code ;
|
}
|
||||||
|
|
||||||
token.Auth( code ) ;
|
|
||||||
|
|
||||||
// save to config
|
// save to config
|
||||||
|
config.Set( "id", Val( id ) ) ;
|
||||||
|
config.Set( "secret", Val( secret ) ) ;
|
||||||
config.Set( "refresh_token", Val( token.RefreshToken() ) ) ;
|
config.Set( "refresh_token", Val( token.RefreshToken() ) ) ;
|
||||||
config.Save() ;
|
config.Save() ;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string refresh_token ;
|
std::string refresh_token ;
|
||||||
|
std::string id ;
|
||||||
|
std::string secret ;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
refresh_token = config.Get("refresh_token").Str() ;
|
refresh_token = config.Get("refresh_token").Str() ;
|
||||||
|
id = config.Get("id").Str() ;
|
||||||
|
secret = config.Get("secret").Str() ;
|
||||||
}
|
}
|
||||||
catch ( Exception& e )
|
catch ( Exception& e )
|
||||||
{
|
{
|
||||||
|
@ -190,36 +230,32 @@ int Main( int argc, char **argv )
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
OAuth2 token( http.get(), refresh_token, client_id, client_secret ) ;
|
OAuth2 token( http.get(), refresh_token, id, secret ) ;
|
||||||
AuthAgent agent( token, http.get() ) ;
|
AuthAgent agent( token, http.get() ) ;
|
||||||
v2::Syncer2 syncer( &agent );
|
v2::Syncer2 syncer( &agent );
|
||||||
|
|
||||||
|
if ( vm.count( "upload-speed" ) > 0 )
|
||||||
|
agent.SetUploadSpeed( vm["upload-speed"].as<unsigned>() * 1000 );
|
||||||
|
if ( vm.count( "download-speed" ) > 0 )
|
||||||
|
agent.SetDownloadSpeed( vm["download-speed"].as<unsigned>() * 1000 );
|
||||||
|
|
||||||
Drive drive( &syncer, config.GetAll() ) ;
|
Drive drive( &syncer, config.GetAll() ) ;
|
||||||
drive.DetectChanges() ;
|
drive.DetectChanges() ;
|
||||||
|
|
||||||
if ( vm.count( "dry-run" ) == 0 )
|
if ( vm.count( "dry-run" ) == 0 )
|
||||||
{
|
{
|
||||||
|
// The progress bar should just be enabled when actual file transfers take place
|
||||||
|
if ( pb )
|
||||||
|
pb->setShowProgressBar( true ) ;
|
||||||
drive.Update() ;
|
drive.Update() ;
|
||||||
|
if ( pb )
|
||||||
|
pb->setShowProgressBar( false ) ;
|
||||||
|
|
||||||
drive.SaveState() ;
|
drive.SaveState() ;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
drive.DryRun() ;
|
drive.DryRun() ;
|
||||||
|
|
||||||
if ( vm.count ( "move" ) > 0 && vm.count( "dry-run" ) == 0 )
|
|
||||||
{
|
|
||||||
if (vm["move"].as<std::vector<std::string> >().size() < 2 )
|
|
||||||
Log( "Not enough arguments for move. Move failed.", log::error );
|
|
||||||
else
|
|
||||||
{
|
|
||||||
bool success = drive.Move( vm["move"].as<std::vector<std::string> >()[0],
|
|
||||||
vm["move"].as<std::vector<std::string> >()[1] );
|
|
||||||
if (success)
|
|
||||||
Log( "Move successful!", log::info );
|
|
||||||
else
|
|
||||||
Log( "Move failed.", log::error);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
config.Save() ;
|
config.Save() ;
|
||||||
Log( "Finished!", log::info ) ;
|
Log( "Finished!", log::info ) ;
|
||||||
return 0 ;
|
return 0 ;
|
||||||
|
|
|
@ -4,30 +4,31 @@ set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/")
|
||||||
|
|
||||||
find_package(LibGcrypt REQUIRED)
|
find_package(LibGcrypt REQUIRED)
|
||||||
find_package(CURL REQUIRED)
|
find_package(CURL REQUIRED)
|
||||||
find_package(EXPAT REQUIRED)
|
find_package(Backtrace)
|
||||||
find_package(Boost 1.40.0 COMPONENTS program_options filesystem unit_test_framework regex system REQUIRED)
|
find_package(Boost 1.40.0 COMPONENTS program_options filesystem unit_test_framework regex system REQUIRED)
|
||||||
find_package(BFD)
|
find_package(BFD)
|
||||||
find_package(CppUnit)
|
find_package(CppUnit)
|
||||||
find_package(Iberty)
|
find_package(Iberty)
|
||||||
find_package(ZLIB)
|
|
||||||
|
|
||||||
find_package(PkgConfig)
|
find_package(PkgConfig)
|
||||||
pkg_check_modules(YAJL REQUIRED yajl)
|
pkg_check_modules(YAJL REQUIRED yajl)
|
||||||
|
|
||||||
|
add_definitions(-Wall)
|
||||||
|
|
||||||
# additional headers if build unit tests
|
# additional headers if build unit tests
|
||||||
IF ( CPPUNIT_FOUND )
|
IF ( CPPUNIT_FOUND )
|
||||||
set( OPT_INCS ${CPPUNIT_INCLUDE_DIR} )
|
set( OPT_INCS ${CPPUNIT_INCLUDE_DIR} )
|
||||||
ENDIF ( CPPUNIT_FOUND )
|
ENDIF ( CPPUNIT_FOUND )
|
||||||
|
|
||||||
# build bfd classes if libbfd is found
|
# build bfd classes if libbfd and the backtrace library is found
|
||||||
if ( BFD_FOUND )
|
if ( BFD_FOUND AND Backtrace_FOUND )
|
||||||
set( OPT_LIBS ${DL_LIBRARY} ${BFD_LIBRARY} )
|
set( OPT_LIBS ${BFD_LIBRARY} ${Backtrace_LIBRARY} )
|
||||||
file( GLOB OPT_SRC
|
file( GLOB OPT_SRC
|
||||||
src/bfd/*.cc
|
src/bfd/*.cc
|
||||||
)
|
)
|
||||||
add_definitions( -DHAVE_BFD )
|
add_definitions( -DHAVE_BFD )
|
||||||
|
|
||||||
endif ( BFD_FOUND )
|
endif ( BFD_FOUND AND Backtrace_FOUND )
|
||||||
|
|
||||||
if ( IBERTY_FOUND )
|
if ( IBERTY_FOUND )
|
||||||
set( OPT_LIBS ${OPT_LIBS} ${IBERTY_LIBRARY} )
|
set( OPT_LIBS ${OPT_LIBS} ${IBERTY_LIBRARY} )
|
||||||
|
@ -35,21 +36,14 @@ else ( IBERTY_FOUND )
|
||||||
set( IBERTY_LIBRARY "" )
|
set( IBERTY_LIBRARY "" )
|
||||||
endif ( IBERTY_FOUND )
|
endif ( IBERTY_FOUND )
|
||||||
|
|
||||||
if ( ZLIB_FOUND )
|
|
||||||
set( OPT_LIBS ${OPT_LIBS} ${ZLIB_LIBRARIES} )
|
|
||||||
endif ( ZLIB_FOUND )
|
|
||||||
|
|
||||||
include_directories(
|
include_directories(
|
||||||
${libgrive_SOURCE_DIR}/src
|
${libgrive_SOURCE_DIR}/src
|
||||||
${libgrive_SOURCE_DIR}/test
|
${libgrive_SOURCE_DIR}/test
|
||||||
|
${Boost_INCLUDE_DIRS}
|
||||||
${OPT_INCS}
|
${OPT_INCS}
|
||||||
${YAJL_INCLUDE_DIRS}
|
${YAJL_INCLUDE_DIRS}
|
||||||
)
|
)
|
||||||
|
|
||||||
file(GLOB DRIVE_HEADERS
|
|
||||||
${libgrive_SOURCE_DIR}/src/drive/*.hh
|
|
||||||
)
|
|
||||||
|
|
||||||
file (GLOB PROTOCOL_HEADERS
|
file (GLOB PROTOCOL_HEADERS
|
||||||
${libgrive_SOURCE_DIR}/src/protocol/*.hh
|
${libgrive_SOURCE_DIR}/src/protocol/*.hh
|
||||||
)
|
)
|
||||||
|
@ -64,14 +58,12 @@ file (GLOB XML_HEADERS
|
||||||
|
|
||||||
file (GLOB LIBGRIVE_SRC
|
file (GLOB LIBGRIVE_SRC
|
||||||
src/base/*.cc
|
src/base/*.cc
|
||||||
src/drive/*.cc
|
|
||||||
src/drive2/*.cc
|
src/drive2/*.cc
|
||||||
src/http/*.cc
|
src/http/*.cc
|
||||||
src/protocol/*.cc
|
src/protocol/*.cc
|
||||||
src/json/*.cc
|
src/json/*.cc
|
||||||
src/util/*.cc
|
src/util/*.cc
|
||||||
src/util/log/*.cc
|
src/util/log/*.cc
|
||||||
src/xml/*.cc
|
|
||||||
)
|
)
|
||||||
|
|
||||||
add_definitions(
|
add_definitions(
|
||||||
|
@ -86,9 +78,11 @@ target_link_libraries( grive
|
||||||
${YAJL_LIBRARIES}
|
${YAJL_LIBRARIES}
|
||||||
${CURL_LIBRARIES}
|
${CURL_LIBRARIES}
|
||||||
${LIBGCRYPT_LIBRARIES}
|
${LIBGCRYPT_LIBRARIES}
|
||||||
${Boost_LIBRARIES}
|
${Boost_FILESYSTEM_LIBRARY}
|
||||||
|
${Boost_PROGRAM_OPTIONS_LIBRARY}
|
||||||
|
${Boost_REGEX_LIBRARY}
|
||||||
|
${Boost_SYSTEM_LIBRARY}
|
||||||
${IBERTY_LIBRARY}
|
${IBERTY_LIBRARY}
|
||||||
${EXPAT_LIBRARY}
|
|
||||||
${OPT_LIBS}
|
${OPT_LIBS}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -118,9 +112,7 @@ IF ( CPPUNIT_FOUND )
|
||||||
# list of test source files here
|
# list of test source files here
|
||||||
file(GLOB TEST_SRC
|
file(GLOB TEST_SRC
|
||||||
test/base/*.cc
|
test/base/*.cc
|
||||||
test/drive/*.cc
|
|
||||||
test/util/*.cc
|
test/util/*.cc
|
||||||
test/xml/*.cc
|
|
||||||
)
|
)
|
||||||
|
|
||||||
add_executable( unittest
|
add_executable( unittest
|
||||||
|
@ -131,7 +123,6 @@ IF ( CPPUNIT_FOUND )
|
||||||
target_link_libraries( unittest
|
target_link_libraries( unittest
|
||||||
grive
|
grive
|
||||||
${CPPUNIT_LIBRARY}
|
${CPPUNIT_LIBRARY}
|
||||||
${Boost_LIBRARIES}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
ENDIF ( CPPUNIT_FOUND )
|
ENDIF ( CPPUNIT_FOUND )
|
||||||
|
@ -144,9 +135,13 @@ add_executable( btest ${BTEST_SRC} )
|
||||||
|
|
||||||
target_link_libraries( btest
|
target_link_libraries( btest
|
||||||
grive
|
grive
|
||||||
${Boost_LIBRARIES}
|
${Boost_UNIT_TEST_FRAMEWORK_LIBRARY}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if ( ${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD" )
|
||||||
|
set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-c++11-narrowing" )
|
||||||
|
endif ( ${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD" )
|
||||||
|
|
||||||
if ( WIN32 )
|
if ( WIN32 )
|
||||||
else ( WIN32 )
|
else ( WIN32 )
|
||||||
set_target_properties( btest
|
set_target_properties( btest
|
||||||
|
|
|
@ -35,22 +35,16 @@
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <sstream>
|
|
||||||
|
|
||||||
// for debugging only
|
// for debugging only
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
namespace gr {
|
namespace gr {
|
||||||
|
|
||||||
namespace
|
|
||||||
{
|
|
||||||
const std::string state_file = ".grive_state" ;
|
|
||||||
}
|
|
||||||
|
|
||||||
Drive::Drive( Syncer *syncer, const Val& options ) :
|
Drive::Drive( Syncer *syncer, const Val& options ) :
|
||||||
m_syncer ( syncer ),
|
m_syncer ( syncer ),
|
||||||
m_root ( options["path"].Str() ),
|
m_root ( options["path"].Str() ),
|
||||||
m_state ( m_root / state_file, options ),
|
m_state ( m_root, options ),
|
||||||
m_options ( options )
|
m_options ( options )
|
||||||
{
|
{
|
||||||
assert( m_syncer ) ;
|
assert( m_syncer ) ;
|
||||||
|
@ -58,18 +52,6 @@ Drive::Drive( Syncer *syncer, const Val& options ) :
|
||||||
|
|
||||||
void Drive::FromRemote( const Entry& entry )
|
void Drive::FromRemote( const Entry& entry )
|
||||||
{
|
{
|
||||||
// entries from change feed does not have the parent HREF,
|
|
||||||
// so these checkings are done in normal entries only
|
|
||||||
Resource *parent = m_state.FindByHref( entry.ParentHref() ) ;
|
|
||||||
|
|
||||||
if ( parent != 0 && !parent->IsFolder() )
|
|
||||||
Log( "warning: entry %1% has parent %2% which is not a folder, ignored",
|
|
||||||
entry.Title(), parent->Name(), log::verbose ) ;
|
|
||||||
|
|
||||||
else if ( parent == 0 || !parent->IsInRootTree() )
|
|
||||||
Log( "file \"%1%\" parent doesn't exist, ignored", entry.Title(), log::verbose ) ;
|
|
||||||
|
|
||||||
else
|
|
||||||
m_state.FromRemote( entry ) ;
|
m_state.FromRemote( entry ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,35 +67,7 @@ void Drive::FromChange( const Entry& entry )
|
||||||
|
|
||||||
void Drive::SaveState()
|
void Drive::SaveState()
|
||||||
{
|
{
|
||||||
m_state.Write( m_root / state_file ) ;
|
m_state.Write() ;
|
||||||
}
|
|
||||||
|
|
||||||
void Drive::SyncFolders( )
|
|
||||||
{
|
|
||||||
Log( "Synchronizing folders", log::info ) ;
|
|
||||||
|
|
||||||
std::auto_ptr<Feed> feed = m_syncer->GetFolders() ;
|
|
||||||
while ( feed->GetNext( m_syncer->Agent() ) )
|
|
||||||
{
|
|
||||||
// first, get all collections from the query result
|
|
||||||
for ( Feed::iterator i = feed->begin() ; i != feed->end() ; ++i )
|
|
||||||
{
|
|
||||||
const Entry &e = *i ;
|
|
||||||
if ( e.IsDir() )
|
|
||||||
{
|
|
||||||
if ( e.ParentHrefs().size() != 1 )
|
|
||||||
Log( "folder \"%1%\" has multiple parents, ignored", e.Title(), log::verbose ) ;
|
|
||||||
|
|
||||||
else if ( e.Title().find('/') != std::string::npos )
|
|
||||||
Log( "folder \"%1%\" contains a slash in its name, ignored", e.Title(), log::verbose ) ;
|
|
||||||
|
|
||||||
else
|
|
||||||
m_state.FromRemote( e ) ;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
m_state.ResolveEntry() ;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Drive::DetectChanges()
|
void Drive::DetectChanges()
|
||||||
|
@ -121,13 +75,8 @@ void Drive::DetectChanges()
|
||||||
Log( "Reading local directories", log::info ) ;
|
Log( "Reading local directories", log::info ) ;
|
||||||
m_state.FromLocal( m_root ) ;
|
m_state.FromLocal( m_root ) ;
|
||||||
|
|
||||||
long prev_stamp = m_state.ChangeStamp() ;
|
|
||||||
Trace( "previous change stamp is %1%", prev_stamp ) ;
|
|
||||||
|
|
||||||
SyncFolders( ) ;
|
|
||||||
|
|
||||||
Log( "Reading remote server file list", log::info ) ;
|
Log( "Reading remote server file list", log::info ) ;
|
||||||
std::auto_ptr<Feed> feed = m_syncer->GetAll() ;
|
std::unique_ptr<Feed> feed = m_syncer->GetAll() ;
|
||||||
|
|
||||||
while ( feed->GetNext( m_syncer->Agent() ) )
|
while ( feed->GetNext( m_syncer->Agent() ) )
|
||||||
{
|
{
|
||||||
|
@ -135,12 +84,19 @@ void Drive::DetectChanges()
|
||||||
feed->begin(), feed->end(),
|
feed->begin(), feed->end(),
|
||||||
boost::bind( &Drive::FromRemote, this, _1 ) ) ;
|
boost::bind( &Drive::FromRemote, this, _1 ) ) ;
|
||||||
}
|
}
|
||||||
|
m_state.ResolveEntry() ;
|
||||||
|
}
|
||||||
|
|
||||||
// pull the changes feed
|
// pull the changes feed
|
||||||
|
// FIXME: unused until Grive will use the feed-based sync instead of reading full tree
|
||||||
|
void Drive::ReadChanges()
|
||||||
|
{
|
||||||
|
long prev_stamp = m_state.ChangeStamp() ;
|
||||||
if ( prev_stamp != -1 )
|
if ( prev_stamp != -1 )
|
||||||
{
|
{
|
||||||
|
Trace( "previous change stamp is %1%", prev_stamp ) ;
|
||||||
Log( "Detecting changes from last sync", log::info ) ;
|
Log( "Detecting changes from last sync", log::info ) ;
|
||||||
feed = m_syncer->GetChanges( prev_stamp+1 ) ;
|
std::unique_ptr<Feed> feed = m_syncer->GetChanges( prev_stamp+1 ) ;
|
||||||
while ( feed->GetNext( m_syncer->Agent() ) )
|
while ( feed->GetNext( m_syncer->Agent() ) )
|
||||||
{
|
{
|
||||||
std::for_each(
|
std::for_each(
|
||||||
|
@ -150,11 +106,6 @@ void Drive::DetectChanges()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Drive::Move( fs::path old_p, fs::path new_p )
|
|
||||||
{
|
|
||||||
return m_state.Move( m_syncer, old_p, new_p, m_options["path"].Str() );
|
|
||||||
}
|
|
||||||
|
|
||||||
void Drive::Update()
|
void Drive::Update()
|
||||||
{
|
{
|
||||||
Log( "Synchronizing files", log::info ) ;
|
Log( "Synchronizing files", log::info ) ;
|
||||||
|
|
|
@ -41,7 +41,6 @@ public :
|
||||||
Drive( Syncer *syncer, const Val& options ) ;
|
Drive( Syncer *syncer, const Val& options ) ;
|
||||||
|
|
||||||
void DetectChanges() ;
|
void DetectChanges() ;
|
||||||
bool Move( fs::path old_p, fs::path new_p );
|
|
||||||
void Update() ;
|
void Update() ;
|
||||||
void DryRun() ;
|
void DryRun() ;
|
||||||
void SaveState() ;
|
void SaveState() ;
|
||||||
|
@ -49,7 +48,7 @@ public :
|
||||||
struct Error : virtual Exception {} ;
|
struct Error : virtual Exception {} ;
|
||||||
|
|
||||||
private :
|
private :
|
||||||
void SyncFolders( ) ;
|
void ReadChanges() ;
|
||||||
void FromRemote( const Entry& entry ) ;
|
void FromRemote( const Entry& entry ) ;
|
||||||
void FromChange( const Entry& entry ) ;
|
void FromChange( const Entry& entry ) ;
|
||||||
void UpdateChangeStamp( ) ;
|
void UpdateChangeStamp( ) ;
|
||||||
|
|
|
@ -36,7 +36,8 @@ Entry::Entry( ) :
|
||||||
m_is_dir ( true ),
|
m_is_dir ( true ),
|
||||||
m_resource_id ( "folder:root" ),
|
m_resource_id ( "folder:root" ),
|
||||||
m_change_stamp ( -1 ),
|
m_change_stamp ( -1 ),
|
||||||
m_is_removed ( false )
|
m_is_removed ( false ),
|
||||||
|
m_size ( 0 )
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,6 +66,11 @@ std::string Entry::MD5() const
|
||||||
return m_md5 ;
|
return m_md5 ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u64_t Entry::Size() const
|
||||||
|
{
|
||||||
|
return m_size ;
|
||||||
|
}
|
||||||
|
|
||||||
DateTime Entry::MTime() const
|
DateTime Entry::MTime() const
|
||||||
{
|
{
|
||||||
return m_mtime ;
|
return m_mtime ;
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include "util/Types.hh"
|
||||||
#include "util/DateTime.hh"
|
#include "util/DateTime.hh"
|
||||||
#include "util/FileSystem.hh"
|
#include "util/FileSystem.hh"
|
||||||
|
|
||||||
|
@ -44,6 +45,7 @@ public :
|
||||||
bool IsDir() const ;
|
bool IsDir() const ;
|
||||||
std::string MD5() const ;
|
std::string MD5() const ;
|
||||||
DateTime MTime() const ;
|
DateTime MTime() const ;
|
||||||
|
u64_t Size() const ;
|
||||||
|
|
||||||
std::string Name() const ;
|
std::string Name() const ;
|
||||||
|
|
||||||
|
@ -80,6 +82,7 @@ protected :
|
||||||
|
|
||||||
DateTime m_mtime ;
|
DateTime m_mtime ;
|
||||||
bool m_is_removed ;
|
bool m_is_removed ;
|
||||||
|
u64_t m_size ;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
} // end of namespace gr
|
} // end of namespace gr
|
||||||
|
|
|
@ -30,6 +30,10 @@ Feed::Feed( const std::string &url ):
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Feed::~Feed()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
Feed::iterator Feed::begin() const
|
Feed::iterator Feed::begin() const
|
||||||
{
|
{
|
||||||
return m_entries.begin() ;
|
return m_entries.begin() ;
|
||||||
|
|
|
@ -41,6 +41,7 @@ public :
|
||||||
public :
|
public :
|
||||||
Feed( const std::string& url );
|
Feed( const std::string& url );
|
||||||
virtual bool GetNext( http::Agent *http ) = 0 ;
|
virtual bool GetNext( http::Agent *http ) = 0 ;
|
||||||
|
virtual ~Feed() = 0 ;
|
||||||
iterator begin() const ;
|
iterator begin() const ;
|
||||||
iterator end() const ;
|
iterator end() const ;
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "Resource.hh"
|
#include "Resource.hh"
|
||||||
|
#include "ResourceTree.hh"
|
||||||
#include "Entry.hh"
|
#include "Entry.hh"
|
||||||
#include "Syncer.hh"
|
#include "Syncer.hh"
|
||||||
|
|
||||||
|
@ -27,6 +28,7 @@
|
||||||
#include "util/log/Log.hh"
|
#include "util/log/Log.hh"
|
||||||
#include "util/OS.hh"
|
#include "util/OS.hh"
|
||||||
#include "util/File.hh"
|
#include "util/File.hh"
|
||||||
|
#include "http/Error.hh"
|
||||||
|
|
||||||
#include <boost/exception/all.hpp>
|
#include <boost/exception/all.hpp>
|
||||||
#include <boost/filesystem.hpp>
|
#include <boost/filesystem.hpp>
|
||||||
|
@ -45,20 +47,26 @@ namespace gr {
|
||||||
Resource::Resource( const fs::path& root_folder ) :
|
Resource::Resource( const fs::path& root_folder ) :
|
||||||
m_name ( root_folder.string() ),
|
m_name ( root_folder.string() ),
|
||||||
m_kind ( "folder" ),
|
m_kind ( "folder" ),
|
||||||
|
m_size ( 0 ),
|
||||||
m_id ( "folder:root" ),
|
m_id ( "folder:root" ),
|
||||||
m_href ( "root" ),
|
m_href ( "root" ),
|
||||||
|
m_is_editable( true ),
|
||||||
m_parent ( 0 ),
|
m_parent ( 0 ),
|
||||||
m_state ( sync ),
|
m_state ( sync ),
|
||||||
m_is_editable( true )
|
m_json ( NULL ),
|
||||||
|
m_local_exists( true )
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
Resource::Resource( const std::string& name, const std::string& kind ) :
|
Resource::Resource( const std::string& name, const std::string& kind ) :
|
||||||
m_name ( name ),
|
m_name ( name ),
|
||||||
m_kind ( kind ),
|
m_kind ( kind ),
|
||||||
|
m_size ( 0 ),
|
||||||
|
m_is_editable( true ),
|
||||||
m_parent ( 0 ),
|
m_parent ( 0 ),
|
||||||
m_state ( unknown ),
|
m_state ( unknown ),
|
||||||
m_is_editable( true )
|
m_json ( NULL ),
|
||||||
|
m_local_exists( false )
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,7 +83,7 @@ void Resource::SetState( State new_state )
|
||||||
boost::bind( &Resource::SetState, _1, new_state ) ) ;
|
boost::bind( &Resource::SetState, _1, new_state ) ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Resource::FromRemoteFolder( const Entry& remote, const DateTime& last_change )
|
void Resource::FromRemoteFolder( const Entry& remote )
|
||||||
{
|
{
|
||||||
fs::path path = Path() ;
|
fs::path path = Path() ;
|
||||||
|
|
||||||
|
@ -83,65 +91,55 @@ void Resource::FromRemoteFolder( const Entry& remote, const DateTime& last_chang
|
||||||
Log( "folder %1% is read-only", path, log::verbose ) ;
|
Log( "folder %1% is read-only", path, log::verbose ) ;
|
||||||
|
|
||||||
// already sync
|
// already sync
|
||||||
if ( fs::is_directory( path ) )
|
if ( m_local_exists && m_kind == "folder" )
|
||||||
{
|
{
|
||||||
Log( "folder %1% is in sync", path, log::verbose ) ;
|
Log( "folder %1% is in sync", path, log::verbose ) ;
|
||||||
m_state = sync ;
|
m_state = sync ;
|
||||||
}
|
}
|
||||||
|
else if ( m_local_exists && m_kind == "file" )
|
||||||
// remote file created after last sync, so remote is newer
|
|
||||||
else if ( remote.MTime() > last_change )
|
|
||||||
{
|
|
||||||
if ( fs::exists( path ) )
|
|
||||||
{
|
{
|
||||||
// TODO: handle type change
|
// TODO: handle type change
|
||||||
Log( "%1% changed from folder to file", path, log::verbose ) ;
|
Log( "%1% changed from folder to file", path, log::verbose ) ;
|
||||||
m_state = sync ;
|
m_state = sync ;
|
||||||
}
|
}
|
||||||
else
|
else if ( m_local_exists && m_kind == "bad" )
|
||||||
{
|
{
|
||||||
// make all children as remote_new, if any
|
Log( "%1% inaccessible", path, log::verbose ) ;
|
||||||
|
m_state = sync ;
|
||||||
|
}
|
||||||
|
else if ( remote.MTime().Sec() > m_mtime.Sec() ) // FIXME only seconds are stored in local index
|
||||||
|
{
|
||||||
|
// remote folder created after last sync, so remote is newer
|
||||||
Log( "folder %1% is created in remote", path, log::verbose ) ;
|
Log( "folder %1% is created in remote", path, log::verbose ) ;
|
||||||
SetState( remote_new ) ;
|
SetState( remote_new ) ;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if ( fs::exists( path ) )
|
|
||||||
{
|
|
||||||
// TODO: handle type chage
|
|
||||||
Log( "%1% changed from file to folder", path, log::verbose ) ;
|
|
||||||
m_state = sync ;
|
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Log( "folder %1% is deleted in local", path, log::verbose ) ;
|
Log( "folder %1% is deleted in local", path, log::verbose ) ;
|
||||||
SetState( local_deleted ) ;
|
SetState( local_deleted ) ;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/// Update the state according to information (i.e. Entry) from remote. This function
|
/// Update the state according to information (i.e. Entry) from remote. This function
|
||||||
/// compares the modification time and checksum of both copies and determine which
|
/// compares the modification time and checksum of both copies and determine which
|
||||||
/// one is newer.
|
/// one is newer.
|
||||||
void Resource::FromRemote( const Entry& remote, const DateTime& last_change )
|
void Resource::FromRemote( const Entry& remote )
|
||||||
{
|
{
|
||||||
// sync folder
|
// sync folder
|
||||||
if ( remote.IsDir() && IsFolder() )
|
if ( remote.IsDir() && IsFolder() )
|
||||||
FromRemoteFolder( remote, last_change ) ;
|
FromRemoteFolder( remote ) ;
|
||||||
else
|
else
|
||||||
FromRemoteFile( remote, last_change ) ;
|
FromRemoteFile( remote ) ;
|
||||||
|
|
||||||
AssignIDs( remote ) ;
|
AssignIDs( remote ) ;
|
||||||
|
|
||||||
assert( m_state != unknown ) ;
|
assert( m_state != unknown ) ;
|
||||||
|
|
||||||
if ( m_state == remote_new || m_state == remote_changed )
|
if ( m_state == remote_new || m_state == remote_changed )
|
||||||
{
|
|
||||||
m_md5 = remote.MD5() ;
|
m_md5 = remote.MD5() ;
|
||||||
|
|
||||||
m_mtime = remote.MTime() ;
|
m_mtime = remote.MTime() ;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void Resource::AssignIDs( const Entry& remote )
|
void Resource::AssignIDs( const Entry& remote )
|
||||||
{
|
{
|
||||||
|
@ -153,10 +151,11 @@ void Resource::AssignIDs( const Entry& remote )
|
||||||
m_content = remote.ContentSrc() ;
|
m_content = remote.ContentSrc() ;
|
||||||
m_is_editable = remote.IsEditable() ;
|
m_is_editable = remote.IsEditable() ;
|
||||||
m_etag = remote.ETag() ;
|
m_etag = remote.ETag() ;
|
||||||
|
m_md5 = remote.MD5() ;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Resource::FromRemoteFile( const Entry& remote, const DateTime& last_change )
|
void Resource::FromRemoteFile( const Entry& remote )
|
||||||
{
|
{
|
||||||
assert( m_parent != 0 ) ;
|
assert( m_parent != 0 ) ;
|
||||||
|
|
||||||
|
@ -174,16 +173,21 @@ void Resource::FromRemoteFile( const Entry& remote, const DateTime& last_change
|
||||||
m_state = m_parent->m_state ;
|
m_state = m_parent->m_state ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
else if ( m_kind == "bad" )
|
||||||
|
{
|
||||||
|
m_state = sync;
|
||||||
|
}
|
||||||
|
|
||||||
// local not exists
|
// local not exists
|
||||||
else if ( !fs::exists( path ) )
|
else if ( !m_local_exists )
|
||||||
{
|
{
|
||||||
Trace( "file %1% change stamp = %2%", Path(), remote.ChangeStamp() ) ;
|
Trace( "file %1% change stamp = %2%", Path(), remote.ChangeStamp() ) ;
|
||||||
|
|
||||||
if ( remote.MTime() > last_change || remote.ChangeStamp() > 0 )
|
if ( remote.MTime().Sec() > m_mtime.Sec() || remote.MD5() != m_md5 || remote.ChangeStamp() > 0 )
|
||||||
{
|
{
|
||||||
Log( "file %1% is created in remote (change %2%)", path,
|
Log( "file %1% is created in remote (change %2%)", path,
|
||||||
remote.ChangeStamp(), log::verbose ) ;
|
remote.ChangeStamp(), log::verbose ) ;
|
||||||
|
m_size = remote.Size();
|
||||||
m_state = remote_new ;
|
m_state = remote_new ;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -196,27 +200,21 @@ void Resource::FromRemoteFile( const Entry& remote, const DateTime& last_change
|
||||||
// remote checksum unknown, assume the file is not changed in remote
|
// remote checksum unknown, assume the file is not changed in remote
|
||||||
else if ( remote.MD5().empty() )
|
else if ( remote.MD5().empty() )
|
||||||
{
|
{
|
||||||
Log( "file %1% has unknown checksum in remote. assuned in sync",
|
Log( "file %1% has unknown checksum in remote. assumed in sync",
|
||||||
Path(), log::verbose ) ;
|
Path(), log::verbose ) ;
|
||||||
m_state = sync ;
|
m_state = sync ;
|
||||||
}
|
}
|
||||||
|
|
||||||
// if checksum is equal, no need to compare the mtime
|
|
||||||
else if ( remote.MD5() == m_md5 )
|
|
||||||
{
|
|
||||||
Log( "file %1% is already in sync", Path(), log::verbose ) ;
|
|
||||||
m_state = sync ;
|
|
||||||
}
|
|
||||||
|
|
||||||
// use mtime to check which one is more recent
|
// use mtime to check which one is more recent
|
||||||
else
|
else if ( remote.Size() != m_size || remote.MD5() != GetMD5() )
|
||||||
{
|
{
|
||||||
assert( m_state != unknown ) ;
|
assert( m_state != unknown ) ;
|
||||||
|
|
||||||
// if remote is modified
|
// if remote is modified
|
||||||
if ( remote.MTime() > m_mtime )
|
if ( remote.MTime().Sec() > m_mtime.Sec() )
|
||||||
{
|
{
|
||||||
Log( "file %1% is changed in remote", path, log::verbose ) ;
|
Log( "file %1% is changed in remote", path, log::verbose ) ;
|
||||||
|
m_size = remote.Size();
|
||||||
m_state = remote_changed ;
|
m_state = remote_changed ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -229,33 +227,105 @@ void Resource::FromRemoteFile( const Entry& remote, const DateTime& last_change
|
||||||
else
|
else
|
||||||
Trace( "file %1% state is %2%", m_name, m_state ) ;
|
Trace( "file %1% state is %2%", m_name, m_state ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if checksum is equal, no need to compare the mtime
|
||||||
|
else
|
||||||
|
{
|
||||||
|
Log( "file %1% is already in sync", Path(), log::verbose ) ;
|
||||||
|
m_state = sync ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Resource::FromDeleted( Val& state )
|
||||||
|
{
|
||||||
|
assert( !m_json );
|
||||||
|
m_json = &state;
|
||||||
|
if ( state.Has( "ctime" ) )
|
||||||
|
m_ctime.Assign( state["ctime"].U64(), 0 );
|
||||||
|
if ( state.Has( "md5" ) )
|
||||||
|
m_md5 = state["md5"];
|
||||||
|
if ( state.Has( "srv_time" ) )
|
||||||
|
m_mtime.Assign( state[ "srv_time" ].U64(), 0 ) ;
|
||||||
|
if ( state.Has( "size" ) )
|
||||||
|
m_size = state[ "size" ].U64();
|
||||||
|
m_state = both_deleted;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update the resource with the attributes of local file or directory. This
|
/// Update the resource with the attributes of local file or directory. This
|
||||||
/// function will propulate the fields in m_entry.
|
/// function will propulate the fields in m_entry.
|
||||||
void Resource::FromLocal( const DateTime& last_sync )
|
void Resource::FromLocal( Val& state )
|
||||||
{
|
{
|
||||||
fs::path path = Path() ;
|
assert( !m_json );
|
||||||
//assert( fs::exists( path ) ) ;
|
m_json = &state;
|
||||||
|
|
||||||
// root folder is always in sync
|
// root folder is always in sync
|
||||||
if ( !IsRoot() )
|
if ( !IsRoot() )
|
||||||
{
|
{
|
||||||
m_mtime = os::FileCTime( path ) ;
|
fs::path path = Path() ;
|
||||||
|
FileType ft ;
|
||||||
// follow parent recursively
|
try
|
||||||
if ( m_parent->m_state == local_new || m_parent->m_state == local_deleted )
|
{
|
||||||
m_state = local_new ;
|
os::Stat( path, &m_ctime, (off64_t*)&m_size, &ft ) ;
|
||||||
|
}
|
||||||
// if the file is not created after last sync, assume file is
|
catch ( os::Error &e )
|
||||||
// remote_deleted first, it will be updated to sync/remote_changed
|
{
|
||||||
// in FromRemote()
|
// invalid symlink, unreadable file or something else
|
||||||
else
|
int const* eno = boost::get_error_info< boost::errinfo_errno >(e);
|
||||||
m_state = ( m_mtime > last_sync ? local_new : remote_deleted ) ;
|
Log( "Error accessing %1%: %2%; skipping file", path.string(), strerror( *eno ), log::warning );
|
||||||
|
m_state = sync;
|
||||||
|
m_kind = "bad";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if ( ft == FT_UNKNOWN )
|
||||||
|
{
|
||||||
|
// Skip sockets/FIFOs/etc
|
||||||
|
Log( "File %1% is not a regular file or directory; skipping file", path.string(), log::warning );
|
||||||
|
m_state = sync;
|
||||||
|
m_kind = "bad";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
m_name = path.filename().string() ;
|
m_name = path.filename().string() ;
|
||||||
m_kind = IsFolder() ? "folder" : "file" ;
|
m_kind = ft == FT_DIR ? "folder" : "file";
|
||||||
m_md5 = IsFolder() ? "" : crypt::MD5::Get( path ) ;
|
m_local_exists = true;
|
||||||
|
|
||||||
|
bool is_changed;
|
||||||
|
if ( state.Has( "ctime" ) && (u64_t) m_ctime.Sec() <= state["ctime"].U64() &&
|
||||||
|
( ft == FT_DIR || state.Has( "md5" ) ) )
|
||||||
|
{
|
||||||
|
if ( ft != FT_DIR )
|
||||||
|
m_md5 = state["md5"];
|
||||||
|
is_changed = false;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if ( ft != FT_DIR )
|
||||||
|
{
|
||||||
|
// File is changed locally. TODO: Detect conflicts
|
||||||
|
is_changed = ( state.Has( "size" ) && m_size != state["size"].U64() ) ||
|
||||||
|
!state.Has( "md5" ) || GetMD5() != state["md5"].Str();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
is_changed = true;
|
||||||
|
}
|
||||||
|
if ( state.Has( "srv_time" ) )
|
||||||
|
m_mtime.Assign( state[ "srv_time" ].U64(), 0 ) ;
|
||||||
|
|
||||||
|
// Upload file if it is changed and remove if not.
|
||||||
|
// State will be updated to sync/remote_changed in FromRemote()
|
||||||
|
m_state = is_changed ? local_new : remote_deleted;
|
||||||
|
if ( m_state == local_new )
|
||||||
|
{
|
||||||
|
// local_new means this file is changed in local.
|
||||||
|
// this means we can't delete any of its parents.
|
||||||
|
// make sure their state is also set to local_new.
|
||||||
|
Resource *p = m_parent;
|
||||||
|
while ( p && p->m_state == remote_deleted )
|
||||||
|
{
|
||||||
|
p->m_state = local_new;
|
||||||
|
p = p->m_parent;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert( m_state != unknown ) ;
|
assert( m_state != unknown ) ;
|
||||||
|
@ -286,7 +356,7 @@ std::string Resource::Kind() const
|
||||||
return m_kind ;
|
return m_kind ;
|
||||||
}
|
}
|
||||||
|
|
||||||
DateTime Resource::MTime() const
|
DateTime Resource::ServerTime() const
|
||||||
{
|
{
|
||||||
return m_mtime ;
|
return m_mtime ;
|
||||||
}
|
}
|
||||||
|
@ -368,14 +438,14 @@ Resource* Resource::FindChild( const std::string& name )
|
||||||
}
|
}
|
||||||
|
|
||||||
// try to change the state to "sync"
|
// try to change the state to "sync"
|
||||||
void Resource::Sync( Syncer *syncer, DateTime& sync_time, const Val& options )
|
void Resource::Sync( Syncer *syncer, ResourceTree *res_tree, const Val& options )
|
||||||
{
|
{
|
||||||
assert( m_state != unknown ) ;
|
assert( m_state != unknown ) ;
|
||||||
assert( !IsRoot() || m_state == sync ) ; // root folder is already synced
|
assert( !IsRoot() || m_state == sync ) ; // root folder is already synced
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
SyncSelf( syncer, options ) ;
|
SyncSelf( syncer, res_tree, options ) ;
|
||||||
}
|
}
|
||||||
catch ( File::Error &e )
|
catch ( File::Error &e )
|
||||||
{
|
{
|
||||||
|
@ -388,18 +458,103 @@ void Resource::Sync( Syncer *syncer, DateTime& sync_time, const Val& options )
|
||||||
Log( "Error syncing %1%: %2%", Path(), e.what(), log::error );
|
Log( "Error syncing %1%: %2%", Path(), e.what(), log::error );
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
catch ( http::Error &e )
|
||||||
// we want the server sync time, so we will take the server time of the last file uploaded to store as the sync time
|
{
|
||||||
// m_mtime is updated to server modified time when the file is uploaded
|
int *curlcode = boost::get_error_info< http::CurlCode > ( e ) ;
|
||||||
sync_time = std::max(sync_time, m_mtime);
|
int *httpcode = boost::get_error_info< http::HttpResponseCode > ( e ) ;
|
||||||
|
std::string msg;
|
||||||
|
if ( curlcode )
|
||||||
|
msg = *( boost::get_error_info< http::CurlErrMsg > ( e ) );
|
||||||
|
else if ( httpcode )
|
||||||
|
msg = "HTTP " + boost::to_string( *httpcode );
|
||||||
|
else
|
||||||
|
msg = e.what();
|
||||||
|
Log( "Error syncing %1%: %2%", Path(), msg, log::error );
|
||||||
|
std::string *url = boost::get_error_info< http::Url > ( e );
|
||||||
|
std::string *resp_hdr = boost::get_error_info< http::HttpResponseHeaders > ( e );
|
||||||
|
std::string *resp_txt = boost::get_error_info< http::HttpResponseText > ( e );
|
||||||
|
http::Header *req_hdr = boost::get_error_info< http::HttpRequestHeaders > ( e );
|
||||||
|
if ( url )
|
||||||
|
Log( "Request URL: %1%", *url, log::verbose );
|
||||||
|
if ( req_hdr )
|
||||||
|
Log( "Request headers: %1%", req_hdr->Str(), log::verbose );
|
||||||
|
if ( resp_hdr )
|
||||||
|
Log( "Response headers: %1%", *resp_hdr, log::verbose );
|
||||||
|
if ( resp_txt )
|
||||||
|
Log( "Response text: %1%", *resp_txt, log::verbose );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// if myself is deleted, no need to do the childrens
|
// if myself is deleted, no need to do the childrens
|
||||||
if ( m_state != local_deleted && m_state != remote_deleted )
|
if ( m_state != local_deleted && m_state != remote_deleted )
|
||||||
|
{
|
||||||
std::for_each( m_child.begin(), m_child.end(),
|
std::for_each( m_child.begin(), m_child.end(),
|
||||||
boost::bind( &Resource::Sync, _1, syncer, boost::ref(sync_time), options ) ) ;
|
boost::bind( &Resource::Sync, _1, syncer, res_tree, options ) ) ;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Resource::SyncSelf( Syncer* syncer, const Val& options )
|
bool Resource::CheckRename( Syncer* syncer, ResourceTree *res_tree )
|
||||||
|
{
|
||||||
|
if ( !IsFolder() && ( m_state == local_new || m_state == remote_new ) )
|
||||||
|
{
|
||||||
|
bool is_local = m_state == local_new;
|
||||||
|
State other = is_local ? local_deleted : remote_deleted;
|
||||||
|
if ( is_local )
|
||||||
|
{
|
||||||
|
// First check size index for locally added files
|
||||||
|
details::SizeRange moved = res_tree->FindBySize( m_size );
|
||||||
|
bool found = false;
|
||||||
|
for ( details::SizeMap::iterator i = moved.first ; i != moved.second; i++ )
|
||||||
|
{
|
||||||
|
Resource *m = *i;
|
||||||
|
if ( m->m_state == other )
|
||||||
|
{
|
||||||
|
found = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ( !found )
|
||||||
|
{
|
||||||
|
// Don't check md5 sums if there are no deleted files with same size
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
details::MD5Range moved = res_tree->FindByMD5( GetMD5() );
|
||||||
|
for ( details::MD5Map::iterator i = moved.first ; i != moved.second; i++ )
|
||||||
|
{
|
||||||
|
Resource *m = *i;
|
||||||
|
if ( m->m_state == other )
|
||||||
|
{
|
||||||
|
Resource* from = m_state == local_new || m_state == remote_new ? m : this;
|
||||||
|
Resource* to = m_state == local_new || m_state == remote_new ? this : m;
|
||||||
|
Log( "sync %1% moved to %2%. moving %3%", from->Path(), to->Path(),
|
||||||
|
is_local ? "remote" : "local", log::info );
|
||||||
|
if ( syncer )
|
||||||
|
{
|
||||||
|
if ( is_local )
|
||||||
|
{
|
||||||
|
syncer->Move( from, to->Parent(), to->Name() );
|
||||||
|
to->SetIndex( false );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
fs::rename( from->Path(), to->Path() );
|
||||||
|
to->SetIndex( true );
|
||||||
|
}
|
||||||
|
to->m_mtime = from->m_mtime;
|
||||||
|
to->m_json->Set( "srv_time", Val( from->m_mtime.Sec() ) );
|
||||||
|
from->DeleteIndex();
|
||||||
|
}
|
||||||
|
from->m_state = both_deleted;
|
||||||
|
to->m_state = sync;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Resource::SyncSelf( Syncer* syncer, ResourceTree *res_tree, const Val& options )
|
||||||
{
|
{
|
||||||
assert( !IsRoot() || m_state == sync ) ; // root is always sync
|
assert( !IsRoot() || m_state == sync ) ; // root is always sync
|
||||||
assert( IsRoot() || !syncer || m_parent->IsFolder() ) ;
|
assert( IsRoot() || !syncer || m_parent->IsFolder() ) ;
|
||||||
|
@ -408,29 +563,45 @@ void Resource::SyncSelf( Syncer* syncer, const Val& options )
|
||||||
|
|
||||||
const fs::path path = Path() ;
|
const fs::path path = Path() ;
|
||||||
|
|
||||||
|
// Detect renames
|
||||||
|
if ( CheckRename( syncer, res_tree ) )
|
||||||
|
return;
|
||||||
|
|
||||||
switch ( m_state )
|
switch ( m_state )
|
||||||
{
|
{
|
||||||
case local_new :
|
case local_new :
|
||||||
Log( "sync %1% doesn't exist in server, uploading", path, log::info ) ;
|
Log( "sync %1% doesn't exist in server, uploading", path, log::info ) ;
|
||||||
|
|
||||||
// FIXME: (?) do not write new timestamp on failed upload
|
|
||||||
if ( syncer && syncer->Create( this ) )
|
if ( syncer && syncer->Create( this ) )
|
||||||
|
{
|
||||||
m_state = sync ;
|
m_state = sync ;
|
||||||
|
SetIndex( false );
|
||||||
|
}
|
||||||
break ;
|
break ;
|
||||||
|
|
||||||
case local_deleted :
|
case local_deleted :
|
||||||
Log( "sync %1% deleted in local. deleting remote", path, log::info ) ;
|
Log( "sync %1% deleted in local. deleting remote", path, log::info ) ;
|
||||||
if ( syncer )
|
if ( syncer && !options["no-delete-remote"].Bool() )
|
||||||
|
{
|
||||||
syncer->DeleteRemote( this ) ;
|
syncer->DeleteRemote( this ) ;
|
||||||
|
DeleteIndex() ;
|
||||||
|
}
|
||||||
break ;
|
break ;
|
||||||
|
|
||||||
case local_changed :
|
case local_changed :
|
||||||
Log( "sync %1% changed in local. uploading", path, log::info ) ;
|
Log( "sync %1% changed in local. uploading", path, log::info ) ;
|
||||||
if ( syncer && syncer->EditContent( this, options["new-rev"].Bool() ) )
|
if ( syncer && syncer->EditContent( this, options["new-rev"].Bool() ) )
|
||||||
|
{
|
||||||
m_state = sync ;
|
m_state = sync ;
|
||||||
|
SetIndex( false );
|
||||||
|
}
|
||||||
break ;
|
break ;
|
||||||
|
|
||||||
case remote_new :
|
case remote_new :
|
||||||
|
if ( options["no-remote-new"].Bool() )
|
||||||
|
Log( "sync %1% created in remote. skipping", path, log::info ) ;
|
||||||
|
else
|
||||||
|
{
|
||||||
Log( "sync %1% created in remote. creating local", path, log::info ) ;
|
Log( "sync %1% created in remote. creating local", path, log::info ) ;
|
||||||
if ( syncer )
|
if ( syncer )
|
||||||
{
|
{
|
||||||
|
@ -438,39 +609,65 @@ void Resource::SyncSelf( Syncer* syncer, const Val& options )
|
||||||
fs::create_directories( path ) ;
|
fs::create_directories( path ) ;
|
||||||
else
|
else
|
||||||
syncer->Download( this, path ) ;
|
syncer->Download( this, path ) ;
|
||||||
|
SetIndex( true ) ;
|
||||||
m_state = sync ;
|
m_state = sync ;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
break ;
|
break ;
|
||||||
|
|
||||||
case remote_changed :
|
case remote_changed :
|
||||||
assert( !IsFolder() ) ;
|
assert( !IsFolder() ) ;
|
||||||
|
if ( options["upload-only"].Bool() )
|
||||||
|
Log( "sync %1% changed in remote. skipping", path, log::info ) ;
|
||||||
|
else
|
||||||
|
{
|
||||||
Log( "sync %1% changed in remote. downloading", path, log::info ) ;
|
Log( "sync %1% changed in remote. downloading", path, log::info ) ;
|
||||||
if ( syncer )
|
if ( syncer )
|
||||||
{
|
{
|
||||||
syncer->Download( this, path ) ;
|
syncer->Download( this, path ) ;
|
||||||
|
SetIndex( true ) ;
|
||||||
m_state = sync ;
|
m_state = sync ;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
break ;
|
break ;
|
||||||
|
|
||||||
case remote_deleted :
|
case remote_deleted :
|
||||||
Log( "sync %1% deleted in remote. deleting local", path, log::info ) ;
|
Log( "sync %1% deleted in remote. deleting local", path, log::info ) ;
|
||||||
if ( syncer )
|
if ( syncer )
|
||||||
|
{
|
||||||
DeleteLocal() ;
|
DeleteLocal() ;
|
||||||
|
DeleteIndex() ;
|
||||||
|
}
|
||||||
|
break ;
|
||||||
|
|
||||||
|
case both_deleted :
|
||||||
|
if ( syncer )
|
||||||
|
DeleteIndex() ;
|
||||||
break ;
|
break ;
|
||||||
|
|
||||||
case sync :
|
case sync :
|
||||||
Log( "sync %1% already in sync", path, log::verbose ) ;
|
Log( "sync %1% already in sync", path, log::verbose ) ;
|
||||||
|
if ( !IsRoot() )
|
||||||
|
SetIndex( false ) ;
|
||||||
break ;
|
break ;
|
||||||
|
|
||||||
// shouldn't go here
|
// shouldn't go here
|
||||||
case unknown :
|
case unknown :
|
||||||
|
default :
|
||||||
assert( false ) ;
|
assert( false ) ;
|
||||||
break ;
|
break ;
|
||||||
|
|
||||||
default :
|
|
||||||
break ;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( syncer && m_json )
|
||||||
|
{
|
||||||
|
// Update server time of this file
|
||||||
|
m_json->Set( "srv_time", Val( m_mtime.Sec() ) );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Resource::SetServerTime( const DateTime& time )
|
||||||
|
{
|
||||||
|
m_mtime = time ;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// this function doesn't really remove the local file. it renames it.
|
/// this function doesn't really remove the local file. it renames it.
|
||||||
|
@ -478,7 +675,7 @@ void Resource::DeleteLocal()
|
||||||
{
|
{
|
||||||
static const boost::format trash_file( "%1%-%2%" ) ;
|
static const boost::format trash_file( "%1%-%2%" ) ;
|
||||||
|
|
||||||
assert( m_parent != 0 ) ;
|
assert( m_parent != NULL ) ;
|
||||||
Resource* p = m_parent;
|
Resource* p = m_parent;
|
||||||
fs::path destdir;
|
fs::path destdir;
|
||||||
while ( !p->IsRoot() )
|
while ( !p->IsRoot() )
|
||||||
|
@ -503,6 +700,38 @@ void Resource::DeleteLocal()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Resource::DeleteIndex()
|
||||||
|
{
|
||||||
|
(*m_parent->m_json)["tree"].Del( Name() );
|
||||||
|
m_json = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Resource::SetIndex( bool re_stat )
|
||||||
|
{
|
||||||
|
assert( m_parent && m_parent->m_json != NULL );
|
||||||
|
if ( !m_json )
|
||||||
|
m_json = &((*m_parent->m_json)["tree"]).Item( Name() );
|
||||||
|
FileType ft;
|
||||||
|
if ( re_stat )
|
||||||
|
os::Stat( Path(), &m_ctime, NULL, &ft );
|
||||||
|
else
|
||||||
|
ft = IsFolder() ? FT_DIR : FT_FILE;
|
||||||
|
m_json->Set( "ctime", Val( m_ctime.Sec() ) );
|
||||||
|
if ( ft != FT_DIR )
|
||||||
|
{
|
||||||
|
m_json->Set( "md5", Val( m_md5 ) );
|
||||||
|
m_json->Set( "size", Val( m_size ) );
|
||||||
|
m_json->Del( "tree" );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// add tree item if it does not exist
|
||||||
|
m_json->Item( "tree" );
|
||||||
|
m_json->Del( "md5" );
|
||||||
|
m_json->Del( "size" );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Resource::iterator Resource::begin() const
|
Resource::iterator Resource::begin() const
|
||||||
{
|
{
|
||||||
return m_child.begin() ;
|
return m_child.begin() ;
|
||||||
|
@ -523,7 +752,7 @@ std::ostream& operator<<( std::ostream& os, Resource::State s )
|
||||||
static const char *state[] =
|
static const char *state[] =
|
||||||
{
|
{
|
||||||
"sync", "local_new", "local_changed", "local_deleted", "remote_new",
|
"sync", "local_new", "local_changed", "local_deleted", "remote_new",
|
||||||
"remote_changed", "remote_deleted"
|
"remote_changed", "remote_deleted", "both_deleted"
|
||||||
} ;
|
} ;
|
||||||
assert( s >= 0 && s < Count(state) ) ;
|
assert( s >= 0 && s < Count(state) ) ;
|
||||||
return os << state[s] ;
|
return os << state[s] ;
|
||||||
|
@ -536,15 +765,32 @@ std::string Resource::StateStr() const
|
||||||
return ss.str() ;
|
return ss.str() ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u64_t Resource::Size() const
|
||||||
|
{
|
||||||
|
return m_size ;
|
||||||
|
}
|
||||||
|
|
||||||
std::string Resource::MD5() const
|
std::string Resource::MD5() const
|
||||||
{
|
{
|
||||||
return m_md5 ;
|
return m_md5 ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string Resource::GetMD5()
|
||||||
|
{
|
||||||
|
if ( m_md5.empty() && !IsFolder() && m_local_exists )
|
||||||
|
{
|
||||||
|
// MD5 checksum is calculated lazily and only when really needed:
|
||||||
|
// 1) when a local rename is supposed (when there are a new file and a deleted file of the same size)
|
||||||
|
// 2) when local ctime is changed, but file size isn't
|
||||||
|
m_md5 = crypt::MD5::Get( Path() );
|
||||||
|
}
|
||||||
|
return m_md5 ;
|
||||||
|
}
|
||||||
|
|
||||||
bool Resource::IsRoot() const
|
bool Resource::IsRoot() const
|
||||||
{
|
{
|
||||||
// Root entry does not show up in file feeds, so we check for empty parent (and self-href)
|
// Root entry does not show up in file feeds, so we check for empty parent (and self-href)
|
||||||
return m_parent == 0 ;
|
return !m_parent ;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Resource::HasID() const
|
bool Resource::HasID() const
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include "util/Types.hh"
|
||||||
#include "util/DateTime.hh"
|
#include "util/DateTime.hh"
|
||||||
#include "util/Exception.hh"
|
#include "util/Exception.hh"
|
||||||
#include "util/FileSystem.hh"
|
#include "util/FileSystem.hh"
|
||||||
|
@ -29,6 +30,8 @@
|
||||||
|
|
||||||
namespace gr {
|
namespace gr {
|
||||||
|
|
||||||
|
class ResourceTree ;
|
||||||
|
|
||||||
class Syncer ;
|
class Syncer ;
|
||||||
|
|
||||||
class Val ;
|
class Val ;
|
||||||
|
@ -73,6 +76,8 @@ public :
|
||||||
/// Resource delete in remote, need to delete in local
|
/// Resource delete in remote, need to delete in local
|
||||||
remote_deleted,
|
remote_deleted,
|
||||||
|
|
||||||
|
/// Both deleted. State is used to remove leftover files from the index after sync.
|
||||||
|
both_deleted,
|
||||||
|
|
||||||
/// invalid value
|
/// invalid value
|
||||||
unknown
|
unknown
|
||||||
|
@ -87,7 +92,7 @@ public :
|
||||||
|
|
||||||
std::string Name() const ;
|
std::string Name() const ;
|
||||||
std::string Kind() const ;
|
std::string Kind() const ;
|
||||||
DateTime MTime() const ;
|
DateTime ServerTime() const ;
|
||||||
std::string SelfHref() const ;
|
std::string SelfHref() const ;
|
||||||
std::string ContentSrc() const ;
|
std::string ContentSrc() const ;
|
||||||
std::string ETag() const ;
|
std::string ETag() const ;
|
||||||
|
@ -104,12 +109,16 @@ public :
|
||||||
bool IsInRootTree() const ;
|
bool IsInRootTree() const ;
|
||||||
bool IsRoot() const ;
|
bool IsRoot() const ;
|
||||||
bool HasID() const ;
|
bool HasID() const ;
|
||||||
|
u64_t Size() const;
|
||||||
std::string MD5() const ;
|
std::string MD5() const ;
|
||||||
|
std::string GetMD5() ;
|
||||||
|
|
||||||
void FromRemote( const Entry& remote, const DateTime& last_change ) ;
|
void FromRemote( const Entry& remote ) ;
|
||||||
void FromLocal( const DateTime& last_sync ) ;
|
void FromDeleted( Val& state ) ;
|
||||||
|
void FromLocal( Val& state ) ;
|
||||||
|
|
||||||
void Sync( Syncer* syncer, DateTime& sync_time, const Val& options ) ;
|
void Sync( Syncer* syncer, ResourceTree *res_tree, const Val& options ) ;
|
||||||
|
void SetServerTime( const DateTime& time ) ;
|
||||||
|
|
||||||
// children access
|
// children access
|
||||||
iterator begin() const ;
|
iterator begin() const ;
|
||||||
|
@ -128,18 +137,23 @@ private :
|
||||||
private :
|
private :
|
||||||
void SetState( State new_state ) ;
|
void SetState( State new_state ) ;
|
||||||
|
|
||||||
void FromRemoteFolder( const Entry& remote, const DateTime& last_change ) ;
|
void FromRemoteFolder( const Entry& remote ) ;
|
||||||
void FromRemoteFile( const Entry& remote, const DateTime& last_change ) ;
|
void FromRemoteFile( const Entry& remote ) ;
|
||||||
|
|
||||||
void DeleteLocal() ;
|
void DeleteLocal() ;
|
||||||
|
void DeleteIndex() ;
|
||||||
|
void SetIndex( bool ) ;
|
||||||
|
|
||||||
void SyncSelf( Syncer* syncer, const Val& options ) ;
|
bool CheckRename( Syncer* syncer, ResourceTree *res_tree ) ;
|
||||||
|
void SyncSelf( Syncer* syncer, ResourceTree *res_tree, const Val& options ) ;
|
||||||
|
|
||||||
private :
|
private :
|
||||||
std::string m_name ;
|
std::string m_name ;
|
||||||
std::string m_kind ;
|
std::string m_kind ;
|
||||||
std::string m_md5 ;
|
std::string m_md5 ;
|
||||||
DateTime m_mtime ;
|
DateTime m_mtime ;
|
||||||
|
DateTime m_ctime ;
|
||||||
|
u64_t m_size ;
|
||||||
|
|
||||||
std::string m_id ;
|
std::string m_id ;
|
||||||
std::string m_href ;
|
std::string m_href ;
|
||||||
|
@ -152,6 +166,8 @@ private :
|
||||||
std::vector<Resource*> m_child ;
|
std::vector<Resource*> m_child ;
|
||||||
|
|
||||||
State m_state ;
|
State m_state ;
|
||||||
|
Val* m_json ;
|
||||||
|
bool m_local_exists ;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
} // end of namespace gr::v1
|
} // end of namespace gr::v1
|
||||||
|
|
|
@ -97,7 +97,21 @@ const Resource* ResourceTree::FindByHref( const std::string& href ) const
|
||||||
return i != map.end() ? *i : 0 ;
|
return i != map.end() ? *i : 0 ;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reinsert should be called when the ID/HREF were updated
|
MD5Range ResourceTree::FindByMD5( const std::string& md5 )
|
||||||
|
{
|
||||||
|
MD5Map& map = m_set.get<ByMD5>() ;
|
||||||
|
if ( !md5.empty() )
|
||||||
|
return map.equal_range( md5 );
|
||||||
|
return MD5Range( map.end(), map.end() ) ;
|
||||||
|
}
|
||||||
|
|
||||||
|
SizeRange ResourceTree::FindBySize( u64_t size )
|
||||||
|
{
|
||||||
|
SizeMap& map = m_set.get<BySize>() ;
|
||||||
|
return map.equal_range( size );
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reinsert should be called when the ID/HREF/MD5 were updated
|
||||||
bool ResourceTree::ReInsert( Resource *coll )
|
bool ResourceTree::ReInsert( Resource *coll )
|
||||||
{
|
{
|
||||||
Set& s = m_set.get<ByIdentity>() ;
|
Set& s = m_set.get<ByIdentity>() ;
|
||||||
|
@ -123,11 +137,11 @@ void ResourceTree::Erase( Resource *coll )
|
||||||
s.erase( s.find( coll ) ) ;
|
s.erase( s.find( coll ) ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ResourceTree::Update( Resource *coll, const Entry& e, const DateTime& last_change )
|
void ResourceTree::Update( Resource *coll, const Entry& e )
|
||||||
{
|
{
|
||||||
assert( coll != 0 ) ;
|
assert( coll != 0 ) ;
|
||||||
|
|
||||||
coll->FromRemote( e, last_change ) ;
|
coll->FromRemote( e ) ;
|
||||||
ReInsert( coll ) ;
|
ReInsert( coll ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,22 +33,27 @@ namespace gr {
|
||||||
namespace details
|
namespace details
|
||||||
{
|
{
|
||||||
using namespace boost::multi_index ;
|
using namespace boost::multi_index ;
|
||||||
struct ByID {} ;
|
struct ByMD5 {} ;
|
||||||
struct ByHref {} ;
|
struct ByHref {} ;
|
||||||
struct ByIdentity {} ;
|
struct ByIdentity {} ;
|
||||||
|
struct BySize {} ;
|
||||||
|
|
||||||
typedef multi_index_container<
|
typedef multi_index_container<
|
||||||
Resource*,
|
Resource*,
|
||||||
indexed_by<
|
indexed_by<
|
||||||
hashed_non_unique<tag<ByHref>, const_mem_fun<Resource, std::string, &Resource::SelfHref> >,
|
hashed_non_unique<tag<ByHref>, const_mem_fun<Resource, std::string, &Resource::SelfHref> >,
|
||||||
hashed_non_unique<tag<ByID>, const_mem_fun<Resource, std::string, &Resource::ResourceID> >,
|
hashed_non_unique<tag<ByMD5>, const_mem_fun<Resource, std::string, &Resource::MD5> >,
|
||||||
|
hashed_non_unique<tag<BySize>, const_mem_fun<Resource, u64_t, &Resource::Size> >,
|
||||||
hashed_unique<tag<ByIdentity>, identity<Resource*> >
|
hashed_unique<tag<ByIdentity>, identity<Resource*> >
|
||||||
>
|
>
|
||||||
> Folders ;
|
> Folders ;
|
||||||
|
|
||||||
typedef Folders::index<ByID>::type IDMap ;
|
typedef Folders::index<ByMD5>::type MD5Map ;
|
||||||
typedef Folders::index<ByHref>::type HrefMap ;
|
typedef Folders::index<ByHref>::type HrefMap ;
|
||||||
|
typedef Folders::index<BySize>::type SizeMap ;
|
||||||
typedef Folders::index<ByIdentity>::type Set ;
|
typedef Folders::index<ByIdentity>::type Set ;
|
||||||
|
typedef std::pair<SizeMap::iterator, SizeMap::iterator> SizeRange ;
|
||||||
|
typedef std::pair<MD5Map::iterator, MD5Map::iterator> MD5Range ;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*! \brief A simple container for storing folders
|
/*! \brief A simple container for storing folders
|
||||||
|
@ -68,14 +73,14 @@ public :
|
||||||
|
|
||||||
Resource* FindByHref( const std::string& href ) ;
|
Resource* FindByHref( const std::string& href ) ;
|
||||||
const Resource* FindByHref( const std::string& href ) const ;
|
const Resource* FindByHref( const std::string& href ) const ;
|
||||||
|
details::MD5Range FindByMD5( const std::string& md5 ) ;
|
||||||
Resource* FindByID( const std::string& id ) ;
|
details::SizeRange FindBySize( u64_t size ) ;
|
||||||
|
|
||||||
bool ReInsert( Resource *coll ) ;
|
bool ReInsert( Resource *coll ) ;
|
||||||
|
|
||||||
void Insert( Resource *coll ) ;
|
void Insert( Resource *coll ) ;
|
||||||
void Erase( Resource *coll ) ;
|
void Erase( Resource *coll ) ;
|
||||||
void Update( Resource *coll, const Entry& e, const DateTime& last_change ) ;
|
void Update( Resource *coll, const Entry& e ) ;
|
||||||
|
|
||||||
Resource* Root() ;
|
Resource* Root() ;
|
||||||
const Resource* Root() const ;
|
const Resource* Root() const ;
|
||||||
|
|
|
@ -26,29 +26,38 @@
|
||||||
#include "util/Crypt.hh"
|
#include "util/Crypt.hh"
|
||||||
#include "util/File.hh"
|
#include "util/File.hh"
|
||||||
#include "util/log/Log.hh"
|
#include "util/log/Log.hh"
|
||||||
#include "json/Val.hh"
|
|
||||||
#include "json/JsonParser.hh"
|
#include "json/JsonParser.hh"
|
||||||
|
|
||||||
|
#include <boost/algorithm/string.hpp>
|
||||||
|
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
|
|
||||||
namespace gr {
|
namespace gr {
|
||||||
|
|
||||||
State::State( const fs::path& filename, const Val& options ) :
|
const std::string state_file = ".grive_state" ;
|
||||||
|
const std::string ignore_file = ".griveignore" ;
|
||||||
|
const int MAX_IGN = 65536 ;
|
||||||
|
const char* regex_escape_chars = ".^$|()[]{}*+?\\";
|
||||||
|
const boost::regex regex_escape_re( "[.^$|()\\[\\]{}*+?\\\\]" );
|
||||||
|
|
||||||
|
inline std::string regex_escape( std::string s )
|
||||||
|
{
|
||||||
|
return regex_replace( s, regex_escape_re, "\\\\&", boost::format_sed );
|
||||||
|
}
|
||||||
|
|
||||||
|
State::State( const fs::path& root, const Val& options ) :
|
||||||
|
m_root ( root ),
|
||||||
m_res ( options["path"].Str() ),
|
m_res ( options["path"].Str() ),
|
||||||
m_cstamp ( -1 )
|
m_cstamp ( -1 )
|
||||||
{
|
{
|
||||||
Read( filename ) ;
|
Read() ;
|
||||||
|
|
||||||
bool force = options.Has( "force" ) ? options["force"].Bool() : false ;
|
// the "-f" option will make grive always think remote is newer
|
||||||
|
m_force = options.Has( "force" ) ? options["force"].Bool() : false ;
|
||||||
|
|
||||||
|
std::string m_orig_ign = m_ign;
|
||||||
if ( options.Has( "ignore" ) && options["ignore"].Str() != m_ign )
|
if ( options.Has( "ignore" ) && options["ignore"].Str() != m_ign )
|
||||||
{
|
|
||||||
// also "-f" is implicitly turned on when ignore regexp is changed
|
|
||||||
// because without it grive would think that previously ignored files are deleted locally
|
|
||||||
if ( !m_ign.empty() )
|
|
||||||
force = true;
|
|
||||||
m_ign = options["ignore"].Str();
|
m_ign = options["ignore"].Str();
|
||||||
}
|
|
||||||
else if ( options.Has( "dir" ) )
|
else if ( options.Has( "dir" ) )
|
||||||
{
|
{
|
||||||
const boost::regex trim_path( "^/+|/+$" );
|
const boost::regex trim_path( "^/+|/+$" );
|
||||||
|
@ -56,24 +65,20 @@ State::State( const fs::path& filename, const Val& options ) :
|
||||||
if ( !m_dir.empty() )
|
if ( !m_dir.empty() )
|
||||||
{
|
{
|
||||||
// "-s" is internally converted to an ignore regexp
|
// "-s" is internally converted to an ignore regexp
|
||||||
const boost::regex esc( "[.^$|()\\[\\]{}*+?\\\\]" );
|
m_dir = regex_escape( m_dir );
|
||||||
std::string ign = "^(?!"+regex_replace( m_dir, esc, "\\\\&", boost::format_sed )+"(/|$))";
|
size_t pos = 0;
|
||||||
if ( !m_ign.empty() && ign != m_ign )
|
while ( ( pos = m_dir.find( '/', pos ) ) != std::string::npos )
|
||||||
force = true;
|
{
|
||||||
|
m_dir = m_dir.substr( 0, pos ) + "$|" + m_dir;
|
||||||
|
pos = pos*2 + 3;
|
||||||
|
}
|
||||||
|
std::string ign = "^(?!"+m_dir+"(/|$))";
|
||||||
m_ign = ign;
|
m_ign = ign;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// the "-f" option will make grive always think remote is newer
|
m_ign_changed = m_orig_ign != "" && m_orig_ign != m_ign;
|
||||||
if ( force )
|
m_ign_re = boost::regex( m_ign.empty() ? "^\\.(grive$|grive_state$|trash)" : ( m_ign+"|^\\.(grive$|grive_state$|trash)" ) );
|
||||||
{
|
|
||||||
m_last_change = DateTime() ;
|
|
||||||
m_last_sync = DateTime::Now() ;
|
|
||||||
}
|
|
||||||
|
|
||||||
m_ign_re = boost::regex( m_ign.empty() ? "^\\.(grive|grive_state|trash)" : ( m_ign+"|^\\.(grive|grive_state|trash)" ) );
|
|
||||||
|
|
||||||
Log( "last server change time: %1%; last sync time: %2%", m_last_change, m_last_sync, log::verbose ) ;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
State::~State()
|
State::~State()
|
||||||
|
@ -84,52 +89,71 @@ State::~State()
|
||||||
/// of local directory.
|
/// of local directory.
|
||||||
void State::FromLocal( const fs::path& p )
|
void State::FromLocal( const fs::path& p )
|
||||||
{
|
{
|
||||||
FromLocal( p, m_res.Root() ) ;
|
m_res.Root()->FromLocal( m_st ) ;
|
||||||
|
FromLocal( p, m_res.Root(), m_st.Item( "tree" ) ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool State::IsIgnore( const std::string& filename )
|
bool State::IsIgnore( const std::string& filename )
|
||||||
{
|
{
|
||||||
return regex_search( filename.c_str(), m_ign_re );
|
return regex_search( filename.c_str(), m_ign_re, boost::format_perl );
|
||||||
}
|
}
|
||||||
|
|
||||||
void State::FromLocal( const fs::path& p, Resource* folder )
|
void State::FromLocal( const fs::path& p, Resource* folder, Val& tree )
|
||||||
{
|
{
|
||||||
assert( folder != 0 ) ;
|
assert( folder != 0 ) ;
|
||||||
assert( folder->IsFolder() ) ;
|
assert( folder->IsFolder() ) ;
|
||||||
|
|
||||||
// sync the folder itself
|
Val::Object leftover = tree.AsObject();
|
||||||
folder->FromLocal( m_last_sync ) ;
|
|
||||||
|
|
||||||
for ( fs::directory_iterator i( p ) ; i != fs::directory_iterator() ; ++i )
|
for ( fs::directory_iterator i( p ) ; i != fs::directory_iterator() ; ++i )
|
||||||
{
|
{
|
||||||
std::string fname = i->path().filename().string() ;
|
std::string fname = i->path().filename().string() ;
|
||||||
fs::file_status st = fs::status(i->path());
|
std::string path = ( folder->IsRoot() ? fname : ( folder->RelPath() / fname ).string() );
|
||||||
|
|
||||||
std::string path = folder->IsRoot() ? fname : ( folder->RelPath() / fname ).string();
|
|
||||||
if ( IsIgnore( path ) )
|
if ( IsIgnore( path ) )
|
||||||
Log( "file %1% is ignored by grive", path, log::verbose ) ;
|
Log( "file %1% is ignored by grive", path, log::verbose ) ;
|
||||||
|
|
||||||
// check for broken symblic links
|
|
||||||
else if ( st.type() == fs::file_not_found )
|
|
||||||
Log( "file %1% doesn't exist (broken link?), ignored", i->path(), log::verbose ) ;
|
|
||||||
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
bool is_dir = st.type() == fs::directory_file;
|
|
||||||
// if the Resource object of the child already exists, it should
|
// if the Resource object of the child already exists, it should
|
||||||
// have been so no need to do anything here
|
// have been so no need to do anything here
|
||||||
Resource *c = folder->FindChild( fname ) ;
|
Resource *c = folder->FindChild( fname ), *c2 = c ;
|
||||||
if ( c == 0 )
|
if ( !c )
|
||||||
{
|
{
|
||||||
c = new Resource( fname, is_dir ? "folder" : "file" ) ;
|
c2 = new Resource( fname, "" ) ;
|
||||||
folder->AddChild( c ) ;
|
folder->AddChild( c2 ) ;
|
||||||
m_res.Insert( c ) ;
|
}
|
||||||
|
leftover.erase( fname );
|
||||||
|
Val& rec = tree.Item( fname );
|
||||||
|
if ( m_force )
|
||||||
|
rec.Del( "srv_time" );
|
||||||
|
c2->FromLocal( rec ) ;
|
||||||
|
if ( !c )
|
||||||
|
m_res.Insert( c2 ) ;
|
||||||
|
if ( c2->IsFolder() )
|
||||||
|
FromLocal( *i, c2, rec.Item( "tree" ) ) ;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
c->FromLocal( m_last_sync ) ;
|
for( Val::Object::iterator i = leftover.begin(); i != leftover.end(); i++ )
|
||||||
|
{
|
||||||
if ( is_dir )
|
std::string path = folder->IsRoot() ? i->first : ( folder->RelPath() / i->first ).string();
|
||||||
FromLocal( *i, c ) ;
|
if ( IsIgnore( path ) )
|
||||||
|
Log( "file %1% is ignored by grive", path, log::verbose ) ;
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Restore state of locally deleted files
|
||||||
|
Resource *c = folder->FindChild( i->first ), *c2 = c ;
|
||||||
|
if ( !c )
|
||||||
|
{
|
||||||
|
c2 = new Resource( i->first, i->second.Has( "tree" ) ? "folder" : "file" ) ;
|
||||||
|
folder->AddChild( c2 ) ;
|
||||||
|
}
|
||||||
|
Val& rec = tree.Item( i->first );
|
||||||
|
if ( m_force || m_ign_changed )
|
||||||
|
rec.Del( "srv_time" );
|
||||||
|
c2->FromDeleted( rec );
|
||||||
|
if ( !c )
|
||||||
|
m_res.Insert( c2 ) ;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -170,9 +194,9 @@ std::size_t State::TryResolveEntry()
|
||||||
assert( !m_unresolved.empty() ) ;
|
assert( !m_unresolved.empty() ) ;
|
||||||
|
|
||||||
std::size_t count = 0 ;
|
std::size_t count = 0 ;
|
||||||
std::vector<Entry>& en = m_unresolved ;
|
std::list<Entry>& en = m_unresolved ;
|
||||||
|
|
||||||
for ( std::vector<Entry>::iterator i = en.begin() ; i != en.end() ; )
|
for ( std::list<Entry>::iterator i = en.begin() ; i != en.end() ; )
|
||||||
{
|
{
|
||||||
if ( Update( *i ) )
|
if ( Update( *i ) )
|
||||||
{
|
{
|
||||||
|
@ -192,7 +216,7 @@ void State::FromChange( const Entry& e )
|
||||||
// entries in the change feed is always treated as newer in remote,
|
// entries in the change feed is always treated as newer in remote,
|
||||||
// so we override the last sync time to 0
|
// so we override the last sync time to 0
|
||||||
if ( Resource *res = m_res.FindByHref( e.SelfHref() ) )
|
if ( Resource *res = m_res.FindByHref( e.SelfHref() ) )
|
||||||
m_res.Update( res, e, DateTime() ) ;
|
m_res.Update( res, e ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool State::Update( const Entry& e )
|
bool State::Update( const Entry& e )
|
||||||
|
@ -208,11 +232,17 @@ bool State::Update( const Entry& e )
|
||||||
Log( "%1% is ignored by grive", path, log::verbose ) ;
|
Log( "%1% is ignored by grive", path, log::verbose ) ;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
m_res.Update( res, e, m_last_change ) ;
|
m_res.Update( res, e ) ;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
else if ( Resource *parent = m_res.FindByHref( e.ParentHref() ) )
|
else if ( Resource *parent = m_res.FindByHref( e.ParentHref() ) )
|
||||||
{
|
{
|
||||||
|
if ( !parent->IsFolder() )
|
||||||
|
{
|
||||||
|
// https://github.com/vitalif/grive2/issues/148
|
||||||
|
Log( "%1% is owned by something that's not a directory: href=%2% name=%3%", e.Name(), e.ParentHref(), parent->RelPath(), log::error );
|
||||||
|
return true;
|
||||||
|
}
|
||||||
assert( parent->IsFolder() ) ;
|
assert( parent->IsFolder() ) ;
|
||||||
|
|
||||||
std::string path = parent->IsRoot() ? e.Name() : ( parent->RelPath() / e.Name() ).string();
|
std::string path = parent->IsRoot() ? e.Name() : ( parent->RelPath() / e.Name() ).string();
|
||||||
|
@ -225,10 +255,10 @@ bool State::Update( const Entry& e )
|
||||||
// see if the entry already exist in local
|
// see if the entry already exist in local
|
||||||
std::string name = e.Name() ;
|
std::string name = e.Name() ;
|
||||||
Resource *child = parent->FindChild( name ) ;
|
Resource *child = parent->FindChild( name ) ;
|
||||||
if ( child != 0 )
|
if ( child )
|
||||||
{
|
{
|
||||||
// since we are updating the ID and Href, we need to remove it and re-add it.
|
// since we are updating the ID and Href, we need to remove it and re-add it.
|
||||||
m_res.Update( child, e, m_last_change ) ;
|
m_res.Update( child, e ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
// folder entry exist in google drive, but not local. we should create
|
// folder entry exist in google drive, but not local. we should create
|
||||||
|
@ -241,7 +271,7 @@ bool State::Update( const Entry& e )
|
||||||
m_res.Insert( child ) ;
|
m_res.Insert( child ) ;
|
||||||
|
|
||||||
// update the state of the resource
|
// update the state of the resource
|
||||||
m_res.Update( child, e, m_last_change ) ;
|
m_res.Update( child, e ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true ;
|
return true ;
|
||||||
|
@ -265,68 +295,126 @@ State::iterator State::end()
|
||||||
return m_res.end() ;
|
return m_res.end() ;
|
||||||
}
|
}
|
||||||
|
|
||||||
void State::Read( const fs::path& filename )
|
void State::Read()
|
||||||
{
|
{
|
||||||
m_last_sync.Assign( 0 ) ;
|
|
||||||
m_last_change.Assign( 0 ) ;
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
File file( filename ) ;
|
File st_file( m_root / state_file ) ;
|
||||||
|
m_st = ParseJson( st_file );
|
||||||
Val json = ParseJson( file );
|
m_cstamp = m_st["change_stamp"].Int() ;
|
||||||
Val last_sync = json["last_sync"] ;
|
|
||||||
Val last_change = json.Has( "last_change" ) ? json["last_change"] : json["last_sync"] ;
|
|
||||||
m_last_sync.Assign( last_sync["sec"].Int(), last_sync["nsec"].Int() ) ;
|
|
||||||
m_last_change.Assign( last_change["sec"].Int(), last_change["nsec"].Int() ) ;
|
|
||||||
m_ign = json.Has( "ignore_regexp" ) ? json["ignore_regexp"].Str() : std::string();
|
|
||||||
|
|
||||||
m_cstamp = json["change_stamp"].Int() ;
|
|
||||||
}
|
}
|
||||||
catch ( Exception& )
|
catch ( Exception& )
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
File ign_file( m_root / ignore_file ) ;
|
||||||
|
char ign[MAX_IGN] = { 0 };
|
||||||
|
int s = ign_file.Read( ign, MAX_IGN-1 ) ;
|
||||||
|
ParseIgnoreFile( ign, s );
|
||||||
|
}
|
||||||
|
catch ( Exception& e )
|
||||||
|
{
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void State::Write( const fs::path& filename ) const
|
std::vector<std::string> split( const boost::regex& re, const char* str, int len )
|
||||||
{
|
{
|
||||||
Val last_sync ;
|
std::vector<std::string> vec;
|
||||||
last_sync.Add( "sec", Val( (int)m_last_sync.Sec() ) );
|
boost::cregex_token_iterator i( str, str+len, re, -1, boost::format_perl );
|
||||||
last_sync.Add( "nsec", Val( (unsigned)m_last_sync.NanoSec() ) );
|
boost::cregex_token_iterator j;
|
||||||
|
while ( i != j )
|
||||||
|
{
|
||||||
|
vec.push_back( *i++ );
|
||||||
|
}
|
||||||
|
return vec;
|
||||||
|
}
|
||||||
|
|
||||||
Val last_change ;
|
bool State::ParseIgnoreFile( const char* buffer, int size )
|
||||||
last_change.Add( "sec", Val( (int)m_last_change.Sec() ) );
|
{
|
||||||
last_change.Add( "nsec", Val( (unsigned)m_last_change.NanoSec() ) );
|
const boost::regex re1( "([^\\\\]|^)[\\t\\r ]+$" );
|
||||||
|
const boost::regex re2( "^[\\t\\r ]+" );
|
||||||
|
const boost::regex re4( "([^\\\\](\\\\\\\\)*|^)\\\\\\*" );
|
||||||
|
const boost::regex re5( "([^\\\\](\\\\\\\\)*|^)\\\\\\?" );
|
||||||
|
std::string exclude_re, include_re;
|
||||||
|
std::vector<std::string> lines = split( boost::regex( "[\\n\\r]+" ), buffer, size );
|
||||||
|
for ( int i = 0; i < (int)lines.size(); i++ )
|
||||||
|
{
|
||||||
|
std::string str = regex_replace( regex_replace( lines[i], re1, "$1" ), re2, "" );
|
||||||
|
if ( str[0] == '#' || !str.size() )
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
bool inc = str[0] == '!';
|
||||||
|
if ( inc )
|
||||||
|
{
|
||||||
|
str = str.substr( 1 );
|
||||||
|
}
|
||||||
|
std::vector<std::string> parts = split( boost::regex( "/+" ), str.c_str(), str.size() );
|
||||||
|
for ( int j = 0; j < (int)parts.size(); j++ )
|
||||||
|
{
|
||||||
|
if ( parts[j] == "**" )
|
||||||
|
{
|
||||||
|
parts[j] = ".*";
|
||||||
|
}
|
||||||
|
else if ( parts[j] == "*" )
|
||||||
|
{
|
||||||
|
parts[j] = "[^/]*";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
parts[j] = regex_escape( parts[j] );
|
||||||
|
std::string str1;
|
||||||
|
while (1)
|
||||||
|
{
|
||||||
|
str1 = regex_replace( parts[j], re5, "$1[^/]", boost::format_perl );
|
||||||
|
str1 = regex_replace( str1, re4, "$1[^/]*", boost::format_perl );
|
||||||
|
if ( str1.size() == parts[j].size() )
|
||||||
|
break;
|
||||||
|
parts[j] = str1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ( !inc )
|
||||||
|
{
|
||||||
|
str = boost::algorithm::join( parts, "/" ) + "(/|$)";
|
||||||
|
exclude_re = exclude_re + ( exclude_re.size() > 0 ? "|" : "" ) + str;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
str = "";
|
||||||
|
std::string cur;
|
||||||
|
for ( int j = 0; j < (int)parts.size(); j++ )
|
||||||
|
{
|
||||||
|
cur = cur.size() > 0 ? cur + "/" + parts[j] : "^" + parts[j];
|
||||||
|
str = ( str.size() > 0 ? str + "|" + cur : cur ) + ( j < (int)parts.size()-1 ? "$" : "(/|$)" );
|
||||||
|
}
|
||||||
|
include_re = include_re + ( include_re.size() > 0 ? "|" : "" ) + str;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ( exclude_re.size() > 0 )
|
||||||
|
{
|
||||||
|
m_ign = "^" + ( include_re.size() > 0 ? "(?!" + include_re + ")" : std::string() ) + "(" + exclude_re + ")$";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
Val result ;
|
void State::Write()
|
||||||
result.Add( "last_sync", last_sync ) ;
|
{
|
||||||
result.Add( "last_change", last_change ) ;
|
m_st.Set( "change_stamp", Val( m_cstamp ) ) ;
|
||||||
result.Add( "change_stamp", Val(m_cstamp) ) ;
|
m_st.Set( "ignore_regexp", Val( m_ign ) ) ;
|
||||||
result.Add( "ignore_regexp", Val(m_ign) ) ;
|
|
||||||
|
|
||||||
|
fs::path filename = m_root / state_file ;
|
||||||
std::ofstream fs( filename.string().c_str() ) ;
|
std::ofstream fs( filename.string().c_str() ) ;
|
||||||
fs << result ;
|
fs << m_st ;
|
||||||
}
|
}
|
||||||
|
|
||||||
void State::Sync( Syncer *syncer, const Val& options )
|
void State::Sync( Syncer *syncer, const Val& options )
|
||||||
{
|
{
|
||||||
// set the last sync time from the time returned by the server for the last file synced
|
|
||||||
// if the sync time hasn't changed (i.e. now files have been uploaded)
|
|
||||||
// set the last sync time to the time on the client
|
// set the last sync time to the time on the client
|
||||||
// ideally because we compare server file times to the last sync time
|
m_res.Root()->Sync( syncer, &m_res, options ) ;
|
||||||
// the last sync time would always be a server time rather than a client time
|
|
||||||
// TODO - WARNING - do we use the last sync time to compare to client file times
|
|
||||||
// need to check if this introduces a new problem
|
|
||||||
DateTime last_change_time = m_last_change;
|
|
||||||
m_res.Root()->Sync( syncer, last_change_time, options ) ;
|
|
||||||
|
|
||||||
if ( last_change_time == m_last_change )
|
|
||||||
Trace( "nothing changed at the server side since %1%", m_last_change ) ;
|
|
||||||
else
|
|
||||||
{
|
|
||||||
Trace( "updating last server-side change time to %1%", last_change_time ) ;
|
|
||||||
m_last_change = last_change_time;
|
|
||||||
}
|
|
||||||
m_last_sync = DateTime::Now();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
long State::ChangeStamp() const
|
long State::ChangeStamp() const
|
||||||
|
@ -340,67 +428,4 @@ void State::ChangeStamp( long cstamp )
|
||||||
m_cstamp = cstamp ;
|
m_cstamp = cstamp ;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool State::Move( Syncer* syncer, fs::path old_p, fs::path new_p, fs::path grive_root )
|
|
||||||
{
|
|
||||||
//Convert paths to canonical representations
|
|
||||||
//Also seems to remove trailing / at the end of directory paths
|
|
||||||
old_p = fs::canonical( old_p );
|
|
||||||
grive_root = fs::canonical( grive_root );
|
|
||||||
|
|
||||||
//new_p is a little special because fs::canonical() requires that the path exists
|
|
||||||
if ( new_p.string()[ new_p.string().size() - 1 ] == '/') //If new_p ends with a /, remove it
|
|
||||||
new_p = new_p.parent_path();
|
|
||||||
new_p = fs::canonical( new_p.parent_path() ) / new_p.filename();
|
|
||||||
|
|
||||||
//Fails if source file doesn't exist, or if destination file already
|
|
||||||
//exists and is not a directory, or if the source and destination are exactly the same
|
|
||||||
if ( (fs::exists(new_p) && !fs::is_directory(new_p) ) || !fs::exists(old_p) || fs::equivalent( old_p, new_p ) )
|
|
||||||
return false;
|
|
||||||
|
|
||||||
//If new path is an existing directory, move the file into the directory
|
|
||||||
//instead of trying to rename it
|
|
||||||
if ( fs::is_directory(new_p) ){
|
|
||||||
new_p = new_p / old_p.filename();
|
|
||||||
}
|
|
||||||
|
|
||||||
//Get the paths relative to grive root.
|
|
||||||
//Just finds the substring from the end of the grive_root to the end of the path
|
|
||||||
//+1s are to exclude slash at beginning of relative path
|
|
||||||
int start = grive_root.string().size() + 1;
|
|
||||||
int nLen = new_p.string().size() - (grive_root.string().size() + 1);
|
|
||||||
int oLen = old_p.string().size() - (grive_root.string().size() + 1);
|
|
||||||
if ( start + nLen != new_p.string().size() || start + oLen != old_p.string().size() )
|
|
||||||
return false;
|
|
||||||
fs::path new_p_rootrel( new_p.string().substr( start, nLen ) );
|
|
||||||
fs::path old_p_rootrel( old_p.string().substr( start, oLen ) );
|
|
||||||
|
|
||||||
//Get resources
|
|
||||||
Resource* res = m_res.Root();
|
|
||||||
Resource* newParentRes = m_res.Root();
|
|
||||||
for ( fs::path::iterator it = old_p_rootrel.begin(); it != old_p_rootrel.end(); ++it )
|
|
||||||
{
|
|
||||||
if ( *it != "." && *it != ".." && res != 0 )
|
|
||||||
res = res->FindChild(it->string());
|
|
||||||
if ( *it == ".." )
|
|
||||||
res = res->Parent();
|
|
||||||
}
|
|
||||||
for ( fs::path::iterator it = new_p_rootrel.begin(); it != new_p_rootrel.end(); ++it )
|
|
||||||
{
|
|
||||||
if ( *it != "." && *it != ".." && *it != new_p.filename() && newParentRes != 0 )
|
|
||||||
newParentRes = newParentRes->FindChild(it->string());
|
|
||||||
if ( *it == "..")
|
|
||||||
res = res->Parent();
|
|
||||||
}
|
|
||||||
|
|
||||||
//These conditions should only occur if everything is not up-to-date
|
|
||||||
if ( res == 0 || newParentRes == 0 || res->GetState() != Resource::sync ||
|
|
||||||
newParentRes->GetState() != Resource::sync ||
|
|
||||||
newParentRes->FindChild( new_p.filename().string() ) != 0 )
|
|
||||||
return false;
|
|
||||||
|
|
||||||
fs::rename(old_p, new_p); //Moves local file
|
|
||||||
syncer->Move(res, newParentRes, new_p.filename().string()); //Moves server file
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // end of namespace gr
|
} // end of namespace gr
|
||||||
|
|
|
@ -23,14 +23,13 @@
|
||||||
|
|
||||||
#include "util/DateTime.hh"
|
#include "util/DateTime.hh"
|
||||||
#include "util/FileSystem.hh"
|
#include "util/FileSystem.hh"
|
||||||
|
#include "json/Val.hh"
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <boost/regex.hpp>
|
#include <boost/regex.hpp>
|
||||||
|
|
||||||
namespace gr {
|
namespace gr {
|
||||||
|
|
||||||
class Val ;
|
|
||||||
|
|
||||||
class Entry ;
|
class Entry ;
|
||||||
|
|
||||||
class Syncer ;
|
class Syncer ;
|
||||||
|
@ -43,15 +42,15 @@ public :
|
||||||
typedef ResourceTree::iterator iterator ;
|
typedef ResourceTree::iterator iterator ;
|
||||||
|
|
||||||
public :
|
public :
|
||||||
explicit State( const fs::path& filename, const Val& options ) ;
|
explicit State( const fs::path& root, const Val& options ) ;
|
||||||
~State() ;
|
~State() ;
|
||||||
|
|
||||||
void FromLocal( const fs::path& p ) ;
|
void FromLocal( const fs::path& p ) ;
|
||||||
void FromRemote( const Entry& e ) ;
|
void FromRemote( const Entry& e ) ;
|
||||||
void ResolveEntry() ;
|
void ResolveEntry() ;
|
||||||
|
|
||||||
void Read( const fs::path& filename ) ;
|
void Read() ;
|
||||||
void Write( const fs::path& filename ) const ;
|
void Write() ;
|
||||||
|
|
||||||
Resource* FindByHref( const std::string& href ) ;
|
Resource* FindByHref( const std::string& href ) ;
|
||||||
Resource* FindByID( const std::string& id ) ;
|
Resource* FindByID( const std::string& id ) ;
|
||||||
|
@ -63,10 +62,10 @@ public :
|
||||||
|
|
||||||
long ChangeStamp() const ;
|
long ChangeStamp() const ;
|
||||||
void ChangeStamp( long cstamp ) ;
|
void ChangeStamp( long cstamp ) ;
|
||||||
bool Move( Syncer* syncer, fs::path old_p, fs::path new_p, fs::path grive_root );
|
|
||||||
|
|
||||||
private :
|
private :
|
||||||
void FromLocal( const fs::path& p, Resource *folder ) ;
|
bool ParseIgnoreFile( const char* buffer, int size ) ;
|
||||||
|
void FromLocal( const fs::path& p, Resource *folder, Val& tree ) ;
|
||||||
void FromChange( const Entry& e ) ;
|
void FromChange( const Entry& e ) ;
|
||||||
bool Update( const Entry& e ) ;
|
bool Update( const Entry& e ) ;
|
||||||
std::size_t TryResolveEntry() ;
|
std::size_t TryResolveEntry() ;
|
||||||
|
@ -74,14 +73,16 @@ private :
|
||||||
bool IsIgnore( const std::string& filename ) ;
|
bool IsIgnore( const std::string& filename ) ;
|
||||||
|
|
||||||
private :
|
private :
|
||||||
|
fs::path m_root ;
|
||||||
ResourceTree m_res ;
|
ResourceTree m_res ;
|
||||||
DateTime m_last_sync ;
|
|
||||||
DateTime m_last_change ;
|
|
||||||
int m_cstamp ;
|
int m_cstamp ;
|
||||||
std::string m_ign ;
|
std::string m_ign ;
|
||||||
boost::regex m_ign_re ;
|
boost::regex m_ign_re ;
|
||||||
|
Val m_st ;
|
||||||
|
bool m_force ;
|
||||||
|
bool m_ign_changed ;
|
||||||
|
|
||||||
std::vector<Entry> m_unresolved ;
|
std::list<Entry> m_unresolved ;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
} // end of namespace gr
|
} // end of namespace gr
|
||||||
|
|
|
@ -41,11 +41,11 @@ http::Agent* Syncer::Agent() const
|
||||||
void Syncer::Download( Resource *res, const fs::path& file )
|
void Syncer::Download( Resource *res, const fs::path& file )
|
||||||
{
|
{
|
||||||
http::Download dl( file.string(), http::Download::NoChecksum() ) ;
|
http::Download dl( file.string(), http::Download::NoChecksum() ) ;
|
||||||
long r = m_http->Get( res->ContentSrc(), &dl, http::Header() ) ;
|
long r = m_http->Get( res->ContentSrc(), &dl, http::Header(), res->Size() ) ;
|
||||||
if ( r <= 400 )
|
if ( r <= 400 )
|
||||||
{
|
{
|
||||||
if ( res->MTime() != DateTime() )
|
if ( res->ServerTime() != DateTime() )
|
||||||
os::SetFileTime( file, res->MTime() ) ;
|
os::SetFileTime( file, res->ServerTime() ) ;
|
||||||
else
|
else
|
||||||
Log( "encountered zero date time after downloading %1%", file, log::warning ) ;
|
Log( "encountered zero date time after downloading %1%", file, log::warning ) ;
|
||||||
}
|
}
|
||||||
|
@ -56,9 +56,4 @@ void Syncer::AssignIDs( Resource *res, const Entry& remote )
|
||||||
res->AssignIDs( remote );
|
res->AssignIDs( remote );
|
||||||
}
|
}
|
||||||
|
|
||||||
void Syncer::AssignMTime( Resource *res, const DateTime& mtime )
|
|
||||||
{
|
|
||||||
res->m_mtime = mtime;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // end of namespace gr
|
} // end of namespace gr
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
|
|
||||||
#include "util/FileSystem.hh"
|
#include "util/FileSystem.hh"
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <iosfwd>
|
#include <iosfwd>
|
||||||
|
@ -55,9 +56,9 @@ public :
|
||||||
virtual bool Create( Resource *res ) = 0;
|
virtual bool Create( Resource *res ) = 0;
|
||||||
virtual bool Move( Resource* res, Resource* newParent, std::string newFilename ) = 0;
|
virtual bool Move( Resource* res, Resource* newParent, std::string newFilename ) = 0;
|
||||||
|
|
||||||
virtual std::auto_ptr<Feed> GetFolders() = 0;
|
virtual std::unique_ptr<Feed> GetFolders() = 0;
|
||||||
virtual std::auto_ptr<Feed> GetAll() = 0;
|
virtual std::unique_ptr<Feed> GetAll() = 0;
|
||||||
virtual std::auto_ptr<Feed> GetChanges( long min_cstamp ) = 0;
|
virtual std::unique_ptr<Feed> GetChanges( long min_cstamp ) = 0;
|
||||||
virtual long GetChangeStamp( long min_cstamp ) = 0;
|
virtual long GetChangeStamp( long min_cstamp ) = 0;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -65,7 +66,6 @@ protected:
|
||||||
http::Agent *m_http;
|
http::Agent *m_http;
|
||||||
|
|
||||||
void AssignIDs( Resource *res, const Entry& remote );
|
void AssignIDs( Resource *res, const Entry& remote );
|
||||||
void AssignMTime( Resource *res, const DateTime& mtime );
|
|
||||||
|
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
|
|
|
@ -118,13 +118,20 @@ void SymbolInfo::BacktraceInfo::Callback( bfd *abfd, asection *section,
|
||||||
if ((section->flags & SEC_ALLOC) == 0)
|
if ((section->flags & SEC_ALLOC) == 0)
|
||||||
return ;
|
return ;
|
||||||
|
|
||||||
bfd_vma vma = bfd_get_section_vma(abfd, section);
|
// bfd_get_section_vma works up to 7b1cfbcf1a27951fb1b3a212995075dd6fdf985b,
|
||||||
|
// removed in 7c13bc8c91abf291f0206b6608b31955c5ea70d8 (binutils 2.33.1 or so)
|
||||||
|
// so it's substituted by its implementation to avoid checking for binutils
|
||||||
|
// version (which at least on Debian SID it's not that easy because the
|
||||||
|
// version.h is not included with the official package)
|
||||||
|
bfd_vma vma = section->vma;
|
||||||
|
|
||||||
unsigned long address = (unsigned long)(info->m_addr);
|
unsigned long address = (unsigned long)(info->m_addr);
|
||||||
if ( address < vma )
|
if ( address < vma )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bfd_size_type size = bfd_section_size(abfd, section);
|
// bfd_section_size changed between the two objects described above,
|
||||||
|
// same rationale applies
|
||||||
|
bfd_size_type size = section->size;
|
||||||
if ( address > (vma + size))
|
if ( address > (vma + size))
|
||||||
return ;
|
return ;
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@ public :
|
||||||
|
|
||||||
private :
|
private :
|
||||||
struct Impl ;
|
struct Impl ;
|
||||||
const std::auto_ptr<Impl> m_impl ;
|
const std::unique_ptr<Impl> m_impl ;
|
||||||
|
|
||||||
struct BacktraceInfo ;
|
struct BacktraceInfo ;
|
||||||
friend struct BacktraceInfo ;
|
friend struct BacktraceInfo ;
|
||||||
|
|
|
@ -1,34 +0,0 @@
|
||||||
/*
|
|
||||||
Common URIs for the old "Document List" Google Docs API
|
|
||||||
Copyright (C) 2012 Wan Wai Ho
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or
|
|
||||||
modify it under the terms of the GNU General Public License
|
|
||||||
as published by the Free Software Foundation version 2
|
|
||||||
of the License.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program; if not, write to the Free Software
|
|
||||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <string>
|
|
||||||
|
|
||||||
namespace gr { namespace v1
|
|
||||||
{
|
|
||||||
const std::string feed_base = "https://docs.google.com/feeds/default/private/full" ;
|
|
||||||
const std::string feed_changes = "https://docs.google.com/feeds/default/private/changes" ;
|
|
||||||
const std::string feed_metadata = "https://docs.google.com/feeds/metadata/default" ;
|
|
||||||
|
|
||||||
const std::string root_href =
|
|
||||||
"https://docs.google.com/feeds/default/private/full/folder%3Aroot" ;
|
|
||||||
const std::string root_create =
|
|
||||||
"https://docs.google.com/feeds/upload/create-session/default/private/full" ;
|
|
||||||
} }
|
|
|
@ -1,86 +0,0 @@
|
||||||
/*
|
|
||||||
Item class implementation for the old "Document List" Google Docs API
|
|
||||||
Copyright (C) 2012 Wan Wai Ho, (C) 2015 Vitaliy Filippov
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or
|
|
||||||
modify it under the terms of the GNU General Public License
|
|
||||||
as published by the Free Software Foundation version 2
|
|
||||||
of the License.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program; if not, write to the Free Software
|
|
||||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "Entry1.hh"
|
|
||||||
#include "CommonUri.hh"
|
|
||||||
|
|
||||||
#include "util/Crypt.hh"
|
|
||||||
#include "util/log/Log.hh"
|
|
||||||
#include "util/OS.hh"
|
|
||||||
#include "xml/Node.hh"
|
|
||||||
#include "xml/NodeSet.hh"
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <iterator>
|
|
||||||
|
|
||||||
namespace gr { namespace v1 {
|
|
||||||
|
|
||||||
Entry1::Entry1():
|
|
||||||
Entry ()
|
|
||||||
{
|
|
||||||
m_self_href = root_href;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// construct an entry for remote - Doclist API v3
|
|
||||||
Entry1::Entry1( const xml::Node& n )
|
|
||||||
{
|
|
||||||
Update( n ) ;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Entry1::Update( const xml::Node& n )
|
|
||||||
{
|
|
||||||
m_title = n["title"] ;
|
|
||||||
m_etag = n["@gd:etag"] ;
|
|
||||||
m_filename = n["docs:suggestedFilename"] ;
|
|
||||||
m_content_src = n["content"]["@src"] ;
|
|
||||||
m_self_href = n["link"].Find( "@rel", "self" )["@href"] ;
|
|
||||||
m_mtime = DateTime( n["updated"] ) ;
|
|
||||||
|
|
||||||
m_resource_id = n["gd:resourceId"] ;
|
|
||||||
m_md5 = n["docs:md5Checksum"] ;
|
|
||||||
m_is_dir = n["category"].Find( "@scheme", "http://schemas.google.com/g/2005#kind" )["@label"] == "folder" ;
|
|
||||||
m_is_editable = !n["link"].Find( "@rel", m_is_dir
|
|
||||||
? "http://schemas.google.com/g/2005#resumable-create-media" : "http://schemas.google.com/g/2005#resumable-edit-media" )
|
|
||||||
["@href"].empty() ;
|
|
||||||
|
|
||||||
// changestamp only appear in change feed entries
|
|
||||||
xml::NodeSet cs = n["docs:changestamp"]["@value"] ;
|
|
||||||
m_change_stamp = cs.empty() ? -1 : std::atoi( cs.front().Value().c_str() ) ;
|
|
||||||
if ( m_change_stamp != -1 )
|
|
||||||
{
|
|
||||||
m_self_href = n["link"].Find( "@rel", "http://schemas.google.com/docs/2007#alt-self" )["@href"] ;
|
|
||||||
}
|
|
||||||
|
|
||||||
m_parent_hrefs.clear( ) ;
|
|
||||||
xml::NodeSet parents = n["link"].Find( "@rel", "http://schemas.google.com/docs/2007#parent" ) ;
|
|
||||||
for ( xml::NodeSet::iterator i = parents.begin() ; i != parents.end() ; ++i )
|
|
||||||
{
|
|
||||||
std::string href = (*i)["@href"];
|
|
||||||
if ( href == root_href )
|
|
||||||
href = "root"; // API-independent root href
|
|
||||||
m_parent_hrefs.push_back( href ) ;
|
|
||||||
}
|
|
||||||
|
|
||||||
// convert to lower case for easy comparison
|
|
||||||
std::transform( m_md5.begin(), m_md5.end(), m_md5.begin(), tolower ) ;
|
|
||||||
|
|
||||||
m_is_removed = !n["gd:deleted"].empty() || !n["docs:removed"].empty() ;
|
|
||||||
}
|
|
||||||
|
|
||||||
} } // end of namespace gr::v1
|
|
|
@ -1,42 +0,0 @@
|
||||||
/*
|
|
||||||
Item class implementation for the old "Document List" Google Docs API
|
|
||||||
Copyright (C) 2012 Wan Wai Ho, (C) 2015 Vitaliy Filippov
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or
|
|
||||||
modify it under the terms of the GNU General Public License
|
|
||||||
as published by the Free Software Foundation version 2
|
|
||||||
of the License.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program; if not, write to the Free Software
|
|
||||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "base/Entry.hh"
|
|
||||||
|
|
||||||
namespace gr {
|
|
||||||
|
|
||||||
namespace xml
|
|
||||||
{
|
|
||||||
class Node ;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace v1 {
|
|
||||||
|
|
||||||
class Entry1: public Entry
|
|
||||||
{
|
|
||||||
public :
|
|
||||||
Entry1( ) ;
|
|
||||||
explicit Entry1( const xml::Node& n ) ;
|
|
||||||
private :
|
|
||||||
void Update( const xml::Node& entry ) ;
|
|
||||||
} ;
|
|
||||||
|
|
||||||
} } // end of namespace gr::v1
|
|
|
@ -1,64 +0,0 @@
|
||||||
/*
|
|
||||||
Item list ("Feed") implementation for the old "Document List" Google Docs API
|
|
||||||
Copyright (C) 2012 Wan Wai Ho, (C) 2015 Vitaliy Filippov
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or
|
|
||||||
modify it under the terms of the GNU General Public License
|
|
||||||
as published by the Free Software Foundation version 2
|
|
||||||
of the License.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program; if not, write to the Free Software
|
|
||||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "CommonUri.hh"
|
|
||||||
#include "Feed1.hh"
|
|
||||||
|
|
||||||
#include "Entry1.hh"
|
|
||||||
|
|
||||||
#include "http/Agent.hh"
|
|
||||||
#include "http/Header.hh"
|
|
||||||
#include "http/XmlResponse.hh"
|
|
||||||
#include "xml/NodeSet.hh"
|
|
||||||
|
|
||||||
#include <boost/format.hpp>
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
namespace gr { namespace v1 {
|
|
||||||
|
|
||||||
Feed1::Feed1( const std::string &url ):
|
|
||||||
Feed( url )
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
bool Feed1::GetNext( http::Agent *http )
|
|
||||||
{
|
|
||||||
http::XmlResponse xrsp ;
|
|
||||||
|
|
||||||
if ( m_next.empty() )
|
|
||||||
return false;
|
|
||||||
|
|
||||||
http->Get( m_next, &xrsp, http::Header() ) ;
|
|
||||||
|
|
||||||
xml::Node m_root = xrsp.Response() ;
|
|
||||||
xml::NodeSet xe = m_root["entry"] ;
|
|
||||||
m_entries.clear() ;
|
|
||||||
for ( xml::NodeSet::iterator i = xe.begin() ; i != xe.end() ; ++i )
|
|
||||||
{
|
|
||||||
m_entries.push_back( Entry1( *i ) );
|
|
||||||
}
|
|
||||||
|
|
||||||
xml::NodeSet nss = m_root["link"].Find( "@rel", "next" ) ;
|
|
||||||
m_next = nss.empty() ? std::string( "" ) : nss["@href"];
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
} } // end of namespace gr::v1
|
|
|
@ -1,40 +0,0 @@
|
||||||
/*
|
|
||||||
Item list ("Feed") implementation for the old "Document List" Google Docs API
|
|
||||||
Copyright (C) 2012 Wan Wai Ho, (C) 2015 Vitaliy Filippov
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or
|
|
||||||
modify it under the terms of the GNU General Public License
|
|
||||||
as published by the Free Software Foundation version 2
|
|
||||||
of the License.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program; if not, write to the Free Software
|
|
||||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "base/Feed.hh"
|
|
||||||
|
|
||||||
#include "xml/Node.hh"
|
|
||||||
#include "xml/NodeSet.hh"
|
|
||||||
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include <string>
|
|
||||||
|
|
||||||
namespace gr { namespace v1 {
|
|
||||||
|
|
||||||
class Feed1: public Feed
|
|
||||||
{
|
|
||||||
public :
|
|
||||||
Feed1( const std::string& url ) ;
|
|
||||||
bool GetNext( http::Agent *http ) ;
|
|
||||||
} ;
|
|
||||||
|
|
||||||
} } // end of namespace gr::v1
|
|
|
@ -1,271 +0,0 @@
|
||||||
/*
|
|
||||||
Syncer implementation for the old "Document List" Google Docs API
|
|
||||||
Copyright (C) 2012 Wan Wai Ho, (C) 2015 Vitaliy Filippov
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or
|
|
||||||
modify it under the terms of the GNU General Public License
|
|
||||||
as published by the Free Software Foundation version 2
|
|
||||||
of the License.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program; if not, write to the Free Software
|
|
||||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "base/Resource.hh"
|
|
||||||
#include "CommonUri.hh"
|
|
||||||
#include "Entry1.hh"
|
|
||||||
#include "Feed1.hh"
|
|
||||||
#include "Syncer1.hh"
|
|
||||||
|
|
||||||
#include "http/Agent.hh"
|
|
||||||
#include "http/Header.hh"
|
|
||||||
#include "http/StringResponse.hh"
|
|
||||||
#include "http/XmlResponse.hh"
|
|
||||||
|
|
||||||
#include "xml/Node.hh"
|
|
||||||
#include "xml/NodeSet.hh"
|
|
||||||
#include "xml/String.hh"
|
|
||||||
#include "xml/TreeBuilder.hh"
|
|
||||||
|
|
||||||
#include "util/File.hh"
|
|
||||||
#include "util/OS.hh"
|
|
||||||
#include "util/log/Log.hh"
|
|
||||||
|
|
||||||
#include <boost/exception/all.hpp>
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
// for debugging
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
namespace gr { namespace v1 {
|
|
||||||
|
|
||||||
// hard coded XML file
|
|
||||||
const std::string xml_meta =
|
|
||||||
"<?xml version='1.0' encoding='UTF-8'?>\n"
|
|
||||||
"<entry xmlns=\"http://www.w3.org/2005/Atom\" xmlns:docs=\"http://schemas.google.com/docs/2007\">"
|
|
||||||
"<category scheme=\"http://schemas.google.com/g/2005#kind\" "
|
|
||||||
"term=\"http://schemas.google.com/docs/2007#%1%\"/>"
|
|
||||||
"<title>%2%</title>"
|
|
||||||
"</entry>" ;
|
|
||||||
|
|
||||||
Syncer1::Syncer1( http::Agent *http ):
|
|
||||||
Syncer( http )
|
|
||||||
{
|
|
||||||
assert( http != 0 ) ;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Syncer1::DeleteRemote( Resource *res )
|
|
||||||
{
|
|
||||||
http::StringResponse str ;
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
http::Header hdr ;
|
|
||||||
hdr.Add( "If-Match: " + res->ETag() ) ;
|
|
||||||
|
|
||||||
// don't know why, but an update before deleting seems to work always
|
|
||||||
http::XmlResponse xml ;
|
|
||||||
m_http->Get( res->SelfHref(), &xml, hdr ) ;
|
|
||||||
AssignIDs( res, Entry1( xml.Response() ) ) ;
|
|
||||||
|
|
||||||
m_http->Request( "DELETE", res->SelfHref(), NULL, &str, hdr ) ;
|
|
||||||
}
|
|
||||||
catch ( Exception& e )
|
|
||||||
{
|
|
||||||
// don't rethrow here. there are some cases that I don't know why
|
|
||||||
// the delete will fail.
|
|
||||||
Trace( "Exception %1% %2%",
|
|
||||||
boost::diagnostic_information(e),
|
|
||||||
str.Response() ) ;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool Syncer1::EditContent( Resource *res, bool new_rev )
|
|
||||||
{
|
|
||||||
assert( res->Parent() ) ;
|
|
||||||
assert( res->Parent()->GetState() == Resource::sync ) ;
|
|
||||||
|
|
||||||
if ( !res->IsEditable() )
|
|
||||||
{
|
|
||||||
Log( "Cannot upload %1%: file read-only. %2%", res->Name(), res->StateStr(), log::warning ) ;
|
|
||||||
return false ;
|
|
||||||
}
|
|
||||||
|
|
||||||
return Upload( res, feed_base + "/" + res->ResourceID() + ( new_rev ? "?new-revision=true" : "" ), false ) ;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool Syncer1::Create( Resource *res )
|
|
||||||
{
|
|
||||||
assert( res->Parent() ) ;
|
|
||||||
assert( res->Parent()->IsFolder() ) ;
|
|
||||||
assert( res->Parent()->GetState() == Resource::sync ) ;
|
|
||||||
|
|
||||||
if ( res->IsFolder() )
|
|
||||||
{
|
|
||||||
std::string uri = feed_base ;
|
|
||||||
if ( !res->Parent()->IsRoot() )
|
|
||||||
uri += ( "/" + m_http->Escape( res->Parent()->ResourceID() ) + "/contents" ) ;
|
|
||||||
|
|
||||||
std::string meta = (boost::format( xml_meta )
|
|
||||||
% "folder"
|
|
||||||
% xml::Escape( res->Name() )
|
|
||||||
).str() ;
|
|
||||||
|
|
||||||
http::Header hdr ;
|
|
||||||
hdr.Add( "Content-Type: application/atom+xml" ) ;
|
|
||||||
|
|
||||||
http::XmlResponse xml ;
|
|
||||||
m_http->Post( uri, meta, &xml, hdr ) ;
|
|
||||||
AssignIDs( res, Entry1( xml.Response() ) ) ;
|
|
||||||
|
|
||||||
return true ;
|
|
||||||
}
|
|
||||||
else if ( res->Parent()->IsEditable() )
|
|
||||||
{
|
|
||||||
return Upload( res, root_create + (res->Parent()->ResourceID() == "folder:root"
|
|
||||||
? "" : "/" + res->Parent()->ResourceID() + "/contents") + "?convert=false", true ) ;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
Log( "parent of %1% does not exist: cannot upload", res->Name(), log::warning ) ;
|
|
||||||
return false ;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool Syncer1::Upload( Resource *res,
|
|
||||||
const std::string& link,
|
|
||||||
bool post )
|
|
||||||
{
|
|
||||||
File file( res->Path() ) ;
|
|
||||||
std::ostringstream xcontent_len ;
|
|
||||||
xcontent_len << "X-Upload-Content-Length: " << file.Size() ;
|
|
||||||
|
|
||||||
http::Header hdr ;
|
|
||||||
hdr.Add( "Content-Type: application/atom+xml" ) ;
|
|
||||||
hdr.Add( "X-Upload-Content-Type: application/octet-stream" ) ;
|
|
||||||
hdr.Add( xcontent_len.str() ) ;
|
|
||||||
hdr.Add( "If-Match: " + res->ETag() ) ;
|
|
||||||
hdr.Add( "Expect:" ) ;
|
|
||||||
|
|
||||||
std::string meta = (boost::format( xml_meta )
|
|
||||||
% res->Kind()
|
|
||||||
% xml::Escape( res->Name() )
|
|
||||||
).str() ;
|
|
||||||
|
|
||||||
bool retrying = false;
|
|
||||||
while ( true )
|
|
||||||
{
|
|
||||||
if ( retrying )
|
|
||||||
{
|
|
||||||
file.Seek( 0, SEEK_SET );
|
|
||||||
os::Sleep( 5 );
|
|
||||||
}
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
http::StringResponse str ;
|
|
||||||
if ( post )
|
|
||||||
m_http->Post( link, meta, &str, hdr ) ;
|
|
||||||
else
|
|
||||||
m_http->Put( link, meta, &str, hdr ) ;
|
|
||||||
}
|
|
||||||
catch ( Exception &e )
|
|
||||||
{
|
|
||||||
std::string const *info = boost::get_error_info<xml::TreeBuilder::ExpatApiError>(e);
|
|
||||||
if ( info && (*info == "XML_Parse") )
|
|
||||||
{
|
|
||||||
Log( "Error parsing pre-upload response XML, retrying whole upload in 5s",
|
|
||||||
log::warning );
|
|
||||||
retrying = true;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
throw e;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
http::Header uphdr ;
|
|
||||||
uphdr.Add( "Expect:" ) ;
|
|
||||||
uphdr.Add( "Accept:" ) ;
|
|
||||||
|
|
||||||
// the content upload URL is in the "Location" HTTP header
|
|
||||||
std::string uplink = m_http->RedirLocation() ;
|
|
||||||
http::XmlResponse xml ;
|
|
||||||
|
|
||||||
long http_code = 0;
|
|
||||||
try
|
|
||||||
{
|
|
||||||
http_code = m_http->Put( uplink, &file, &xml, uphdr ) ;
|
|
||||||
}
|
|
||||||
catch ( Exception &e )
|
|
||||||
{
|
|
||||||
std::string const *info = boost::get_error_info<xml::TreeBuilder::ExpatApiError>(e);
|
|
||||||
if ( info && (*info == "XML_Parse") )
|
|
||||||
{
|
|
||||||
Log( "Error parsing response XML, retrying whole upload in 5s",
|
|
||||||
log::warning );
|
|
||||||
retrying = true;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
throw e;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( http_code == 410 || http_code == 412 )
|
|
||||||
{
|
|
||||||
Log( "request failed with %1%, body: %2%, retrying whole upload in 5s", http_code, m_http->LastError(), log::warning ) ;
|
|
||||||
retrying = true;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( retrying )
|
|
||||||
Log( "upload succeeded on retry", log::warning );
|
|
||||||
Entry1 responseEntry = Entry1( xml.Response() );
|
|
||||||
AssignIDs( res, responseEntry ) ;
|
|
||||||
AssignMTime( res, responseEntry.MTime() );
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true ;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::auto_ptr<Feed> Syncer1::GetFolders()
|
|
||||||
{
|
|
||||||
return std::auto_ptr<Feed>( new Feed1( feed_base + "/-/folder?max-results=50&showroot=true" ) );
|
|
||||||
}
|
|
||||||
|
|
||||||
std::auto_ptr<Feed> Syncer1::GetAll()
|
|
||||||
{
|
|
||||||
return std::auto_ptr<Feed>( new Feed1( feed_base + "?showfolders=true&showroot=true" ) );
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string ChangesFeed( int changestamp )
|
|
||||||
{
|
|
||||||
boost::format feed( feed_changes + "?start-index=%1%" ) ;
|
|
||||||
return changestamp > 0 ? ( feed % changestamp ).str() : feed_changes ;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::auto_ptr<Feed> Syncer1::GetChanges( long min_cstamp )
|
|
||||||
{
|
|
||||||
return std::auto_ptr<Feed>( new Feed1( ChangesFeed( min_cstamp ) ) );
|
|
||||||
}
|
|
||||||
|
|
||||||
long Syncer1::GetChangeStamp( long min_cstamp )
|
|
||||||
{
|
|
||||||
http::XmlResponse xrsp ;
|
|
||||||
m_http->Get( ChangesFeed( min_cstamp ), &xrsp, http::Header() ) ;
|
|
||||||
|
|
||||||
return std::atoi( xrsp.Response()["docs:largestChangestamp"]["@value"].front().Value().c_str() );
|
|
||||||
}
|
|
||||||
|
|
||||||
} } // end of namespace gr::v1
|
|
|
@ -1,52 +0,0 @@
|
||||||
/*
|
|
||||||
Syncer implementation for the old "Document List" Google Docs API
|
|
||||||
Copyright (C) 2012 Wan Wai Ho, (C) 2015 Vitaliy Filippov
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or
|
|
||||||
modify it under the terms of the GNU General Public License
|
|
||||||
as published by the Free Software Foundation version 2
|
|
||||||
of the License.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program; if not, write to the Free Software
|
|
||||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "base/Syncer.hh"
|
|
||||||
|
|
||||||
namespace gr {
|
|
||||||
|
|
||||||
class Feed;
|
|
||||||
|
|
||||||
namespace v1 {
|
|
||||||
|
|
||||||
class Syncer1: public Syncer
|
|
||||||
{
|
|
||||||
|
|
||||||
public :
|
|
||||||
|
|
||||||
Syncer1( http::Agent *http );
|
|
||||||
|
|
||||||
void DeleteRemote( Resource *res );
|
|
||||||
bool EditContent( Resource *res, bool new_rev );
|
|
||||||
bool Create( Resource *res );
|
|
||||||
|
|
||||||
std::auto_ptr<Feed> GetFolders();
|
|
||||||
std::auto_ptr<Feed> GetAll();
|
|
||||||
std::auto_ptr<Feed> GetChanges( long min_cstamp );
|
|
||||||
long GetChangeStamp( long min_cstamp );
|
|
||||||
|
|
||||||
private :
|
|
||||||
|
|
||||||
bool Upload( Resource *res, const std::string& link, bool post);
|
|
||||||
|
|
||||||
} ;
|
|
||||||
|
|
||||||
} } // end of namespace gr::v1
|
|
|
@ -44,6 +44,7 @@ void Entry2::Update( const Val& item )
|
||||||
// changestamp only appears in change feed entries
|
// changestamp only appears in change feed entries
|
||||||
m_change_stamp = is_chg ? item["id"].Int() : -1 ;
|
m_change_stamp = is_chg ? item["id"].Int() : -1 ;
|
||||||
m_is_removed = is_chg && item["deleted"].Bool() ;
|
m_is_removed = is_chg && item["deleted"].Bool() ;
|
||||||
|
m_size = 0 ;
|
||||||
|
|
||||||
const Val& file = is_chg && !m_is_removed ? item["file"] : item;
|
const Val& file = is_chg && !m_is_removed ? item["file"] : item;
|
||||||
|
|
||||||
|
@ -75,6 +76,7 @@ void Entry2::Update( const Val& item )
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
m_md5 = file["md5Checksum"] ;
|
m_md5 = file["md5Checksum"] ;
|
||||||
|
m_size = file["fileSize"].U64() ;
|
||||||
m_content_src = file["downloadUrl"] ;
|
m_content_src = file["downloadUrl"] ;
|
||||||
// convert to lower case for easy comparison
|
// convert to lower case for easy comparison
|
||||||
std::transform( m_md5.begin(), m_md5.end(), m_md5.begin(), tolower ) ;
|
std::transform( m_md5.begin(), m_md5.end(), m_md5.begin(), tolower ) ;
|
||||||
|
|
|
@ -36,13 +36,17 @@ Feed2::Feed2( const std::string& url ):
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Feed2::~Feed2()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
bool Feed2::GetNext( http::Agent *http )
|
bool Feed2::GetNext( http::Agent *http )
|
||||||
{
|
{
|
||||||
if ( m_next.empty() )
|
if ( m_next.empty() )
|
||||||
return false ;
|
return false ;
|
||||||
|
|
||||||
http::ValResponse out ;
|
http::ValResponse out ;
|
||||||
http->Get( m_next, &out, http::Header() ) ;
|
http->Get( m_next, &out, http::Header(), 0 ) ;
|
||||||
Val m_content = out.Response() ;
|
Val m_content = out.Response() ;
|
||||||
|
|
||||||
Val::Array items = m_content["items"].AsArray() ;
|
Val::Array items = m_content["items"].AsArray() ;
|
||||||
|
|
|
@ -31,6 +31,7 @@ class Feed2: public Feed
|
||||||
{
|
{
|
||||||
public :
|
public :
|
||||||
Feed2( const std::string& url ) ;
|
Feed2( const std::string& url ) ;
|
||||||
|
~Feed2() ;
|
||||||
bool GetNext( http::Agent *http ) ;
|
bool GetNext( http::Agent *http ) ;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
|
|
|
@ -70,7 +70,7 @@ bool Syncer2::EditContent( Resource *res, bool new_rev )
|
||||||
return false ;
|
return false ;
|
||||||
}
|
}
|
||||||
|
|
||||||
return Upload( res ) ;
|
return Upload( res, new_rev ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Syncer2::Create( Resource *res )
|
bool Syncer2::Create( Resource *res )
|
||||||
|
@ -86,7 +86,7 @@ bool Syncer2::Create( Resource *res )
|
||||||
return false ;
|
return false ;
|
||||||
}
|
}
|
||||||
|
|
||||||
return Upload( res );
|
return Upload( res, false );
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Syncer2::Move( Resource* res, Resource* newParentRes, std::string newFilename )
|
bool Syncer2::Move( Resource* res, Resource* newParentRes, std::string newFilename )
|
||||||
|
@ -121,12 +121,15 @@ bool Syncer2::Move( Resource* res, Resource* newParentRes, std::string newFilena
|
||||||
http::Header hdr2 ;
|
http::Header hdr2 ;
|
||||||
hdr2.Add( "Content-Type: application/json" );
|
hdr2.Add( "Content-Type: application/json" );
|
||||||
http::ValResponse vrsp ;
|
http::ValResponse vrsp ;
|
||||||
long http_code = 0;
|
|
||||||
// Don't change modified date because we're only moving
|
// Don't change modified date because we're only moving
|
||||||
http_code = m_http->Put( feeds::files + "/" + res->ResourceID() + "?modifiedDateBehavior=noChange" + addRemoveParents, json_meta, &vrsp, hdr2 ) ;
|
long http_code = m_http->Put(
|
||||||
|
feeds::files + "/" + res->ResourceID() + "?modifiedDateBehavior=noChange" + addRemoveParents,
|
||||||
|
json_meta, &vrsp, hdr2
|
||||||
|
) ;
|
||||||
valr = vrsp.Response();
|
valr = vrsp.Response();
|
||||||
assert( !( valr["id"].Str().empty() ) );
|
assert( http_code == 200 && !( valr["id"].Str().empty() ) );
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,7 +140,7 @@ std::string to_string( uint64_t n )
|
||||||
return s.str();
|
return s.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Syncer2::Upload( Resource *res )
|
bool Syncer2::Upload( Resource *res, bool new_rev )
|
||||||
{
|
{
|
||||||
Val meta;
|
Val meta;
|
||||||
meta.Add( "title", Val( res->Name() ) );
|
meta.Add( "title", Val( res->Name() ) );
|
||||||
|
@ -167,7 +170,7 @@ bool Syncer2::Upload( Resource *res )
|
||||||
else
|
else
|
||||||
http_code = m_http->Put( feeds::files + "/" + res->ResourceID(), json_meta, &vrsp, hdr2 ) ;
|
http_code = m_http->Put( feeds::files + "/" + res->ResourceID(), json_meta, &vrsp, hdr2 ) ;
|
||||||
valr = vrsp.Response();
|
valr = vrsp.Response();
|
||||||
assert( !( valr["id"].Str().empty() ) );
|
assert( http_code == 200 && !( valr["id"].Str().empty() ) );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -193,7 +196,8 @@ bool Syncer2::Upload( Resource *res )
|
||||||
http::ValResponse vrsp;
|
http::ValResponse vrsp;
|
||||||
m_http->Request(
|
m_http->Request(
|
||||||
res->ResourceID().empty() ? "POST" : "PUT",
|
res->ResourceID().empty() ? "POST" : "PUT",
|
||||||
upload_base + ( res->ResourceID().empty() ? "" : "/" + res->ResourceID() ) + "?uploadType=multipart",
|
upload_base + ( res->ResourceID().empty() ? "" : "/" + res->ResourceID() ) +
|
||||||
|
"?uploadType=multipart&newRevision=" + ( new_rev ? "true" : "false" ),
|
||||||
&multipart, &vrsp, hdr
|
&multipart, &vrsp, hdr
|
||||||
) ;
|
) ;
|
||||||
valr = vrsp.Response() ;
|
valr = vrsp.Response() ;
|
||||||
|
@ -202,19 +206,19 @@ bool Syncer2::Upload( Resource *res )
|
||||||
|
|
||||||
Entry2 responseEntry = Entry2( valr ) ;
|
Entry2 responseEntry = Entry2( valr ) ;
|
||||||
AssignIDs( res, responseEntry ) ;
|
AssignIDs( res, responseEntry ) ;
|
||||||
AssignMTime( res, responseEntry.MTime() ) ;
|
res->SetServerTime( responseEntry.MTime() );
|
||||||
|
|
||||||
return true ;
|
return true ;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::auto_ptr<Feed> Syncer2::GetFolders()
|
std::unique_ptr<Feed> Syncer2::GetFolders()
|
||||||
{
|
{
|
||||||
return std::auto_ptr<Feed>( new Feed2( feeds::files + "?maxResults=1000&q=%27me%27+in+readers+and+trashed%3dfalse+and+mimeType%3d%27" + mime_types::folder + "%27" ) );
|
return std::unique_ptr<Feed>( new Feed2( feeds::files + "?maxResults=100000&q=trashed%3dfalse+and+mimeType%3d%27" + mime_types::folder + "%27" ) );
|
||||||
}
|
}
|
||||||
|
|
||||||
std::auto_ptr<Feed> Syncer2::GetAll()
|
std::unique_ptr<Feed> Syncer2::GetAll()
|
||||||
{
|
{
|
||||||
return std::auto_ptr<Feed>( new Feed2( feeds::files + "?maxResults=1000&q=%27me%27+in+readers+and+trashed%3dfalse" ) );
|
return std::unique_ptr<Feed>( new Feed2( feeds::files + "?maxResults=999999999&q=trashed%3dfalse" ) );
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string ChangesFeed( long changestamp, int maxResults = 1000 )
|
std::string ChangesFeed( long changestamp, int maxResults = 1000 )
|
||||||
|
@ -223,15 +227,15 @@ std::string ChangesFeed( long changestamp, int maxResults = 1000 )
|
||||||
return ( changestamp > 0 ? feed % maxResults % changestamp : feed % maxResults ).str() ;
|
return ( changestamp > 0 ? feed % maxResults % changestamp : feed % maxResults ).str() ;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::auto_ptr<Feed> Syncer2::GetChanges( long min_cstamp )
|
std::unique_ptr<Feed> Syncer2::GetChanges( long min_cstamp )
|
||||||
{
|
{
|
||||||
return std::auto_ptr<Feed>( new Feed2( ChangesFeed( min_cstamp ) ) );
|
return std::unique_ptr<Feed>( new Feed2( ChangesFeed( min_cstamp ) ) );
|
||||||
}
|
}
|
||||||
|
|
||||||
long Syncer2::GetChangeStamp( long min_cstamp )
|
long Syncer2::GetChangeStamp( long min_cstamp )
|
||||||
{
|
{
|
||||||
http::ValResponse res ;
|
http::ValResponse res ;
|
||||||
m_http->Get( ChangesFeed( min_cstamp, 1 ), &res, http::Header() ) ;
|
m_http->Get( ChangesFeed( min_cstamp, 1 ), &res, http::Header(), 0 ) ;
|
||||||
|
|
||||||
return std::atoi( res.Response()["largestChangeId"].Str().c_str() );
|
return std::atoi( res.Response()["largestChangeId"].Str().c_str() );
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,14 +39,14 @@ public :
|
||||||
bool Create( Resource *res );
|
bool Create( Resource *res );
|
||||||
bool Move( Resource* res, Resource* newParent, std::string newFilename );
|
bool Move( Resource* res, Resource* newParent, std::string newFilename );
|
||||||
|
|
||||||
std::auto_ptr<Feed> GetFolders();
|
std::unique_ptr<Feed> GetFolders();
|
||||||
std::auto_ptr<Feed> GetAll();
|
std::unique_ptr<Feed> GetAll();
|
||||||
std::auto_ptr<Feed> GetChanges( long min_cstamp );
|
std::unique_ptr<Feed> GetChanges( long min_cstamp );
|
||||||
long GetChangeStamp( long min_cstamp );
|
long GetChangeStamp( long min_cstamp );
|
||||||
|
|
||||||
private :
|
private :
|
||||||
|
|
||||||
bool Upload( Resource *res );
|
bool Upload( Resource *res, bool new_rev );
|
||||||
|
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
|
|
|
@ -25,6 +25,11 @@ namespace gr {
|
||||||
|
|
||||||
namespace http {
|
namespace http {
|
||||||
|
|
||||||
|
Agent::Agent()
|
||||||
|
{
|
||||||
|
mMaxUpload = mMaxDownload = 0;
|
||||||
|
}
|
||||||
|
|
||||||
long Agent::Put(
|
long Agent::Put(
|
||||||
const std::string& url,
|
const std::string& url,
|
||||||
const std::string& data,
|
const std::string& data,
|
||||||
|
@ -47,9 +52,10 @@ long Agent::Put(
|
||||||
long Agent::Get(
|
long Agent::Get(
|
||||||
const std::string& url,
|
const std::string& url,
|
||||||
DataStream *dest,
|
DataStream *dest,
|
||||||
const Header& hdr )
|
const Header& hdr,
|
||||||
|
u64_t downloadFileBytes )
|
||||||
{
|
{
|
||||||
return Request( "GET", url, NULL, dest, hdr );
|
return Request( "GET", url, NULL, dest, hdr, downloadFileBytes );
|
||||||
}
|
}
|
||||||
|
|
||||||
long Agent::Post(
|
long Agent::Post(
|
||||||
|
@ -64,4 +70,14 @@ long Agent::Post(
|
||||||
return Request( "POST", url, &s, dest, h );
|
return Request( "POST", url, &s, dest, h );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Agent::SetUploadSpeed( unsigned kbytes )
|
||||||
|
{
|
||||||
|
mMaxUpload = kbytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Agent::SetDownloadSpeed( unsigned kbytes )
|
||||||
|
{
|
||||||
|
mMaxDownload = kbytes;
|
||||||
|
}
|
||||||
|
|
||||||
} } // end of namespace
|
} } // end of namespace
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include <string>
|
#include <string>
|
||||||
#include "ResponseLog.hh"
|
#include "ResponseLog.hh"
|
||||||
#include "util/Types.hh"
|
#include "util/Types.hh"
|
||||||
|
#include "util/Progress.hh"
|
||||||
|
|
||||||
namespace gr {
|
namespace gr {
|
||||||
|
|
||||||
|
@ -34,7 +35,11 @@ class Header ;
|
||||||
|
|
||||||
class Agent
|
class Agent
|
||||||
{
|
{
|
||||||
|
protected:
|
||||||
|
unsigned mMaxUpload, mMaxDownload ;
|
||||||
|
|
||||||
public :
|
public :
|
||||||
|
Agent() ;
|
||||||
virtual ~Agent() {}
|
virtual ~Agent() {}
|
||||||
|
|
||||||
virtual ResponseLog* GetLog() const = 0 ;
|
virtual ResponseLog* GetLog() const = 0 ;
|
||||||
|
@ -55,7 +60,8 @@ public :
|
||||||
virtual long Get(
|
virtual long Get(
|
||||||
const std::string& url,
|
const std::string& url,
|
||||||
DataStream *dest,
|
DataStream *dest,
|
||||||
const Header& hdr ) ;
|
const Header& hdr,
|
||||||
|
u64_t downloadFileBytes = 0 ) ;
|
||||||
|
|
||||||
virtual long Post(
|
virtual long Post(
|
||||||
const std::string& url,
|
const std::string& url,
|
||||||
|
@ -68,7 +74,11 @@ public :
|
||||||
const std::string& url,
|
const std::string& url,
|
||||||
SeekStream *in,
|
SeekStream *in,
|
||||||
DataStream *dest,
|
DataStream *dest,
|
||||||
const Header& hdr ) = 0 ;
|
const Header& hdr,
|
||||||
|
u64_t downloadFileBytes = 0 ) = 0 ;
|
||||||
|
|
||||||
|
virtual void SetUploadSpeed( unsigned kbytes ) ;
|
||||||
|
virtual void SetDownloadSpeed( unsigned kbytes ) ;
|
||||||
|
|
||||||
virtual std::string LastError() const = 0 ;
|
virtual std::string LastError() const = 0 ;
|
||||||
virtual std::string LastErrorHeaders() const = 0 ;
|
virtual std::string LastErrorHeaders() const = 0 ;
|
||||||
|
@ -77,6 +87,8 @@ public :
|
||||||
|
|
||||||
virtual std::string Escape( const std::string& str ) = 0 ;
|
virtual std::string Escape( const std::string& str ) = 0 ;
|
||||||
virtual std::string Unescape( const std::string& str ) = 0 ;
|
virtual std::string Unescape( const std::string& str ) = 0 ;
|
||||||
|
|
||||||
|
virtual void SetProgressReporter( Progress* ) = 0;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
} } // end of namespace
|
} } // end of namespace
|
||||||
|
|
|
@ -28,14 +28,10 @@
|
||||||
|
|
||||||
#include <boost/throw_exception.hpp>
|
#include <boost/throw_exception.hpp>
|
||||||
|
|
||||||
// dependent libraries
|
|
||||||
#include <curl/curl.h>
|
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <limits>
|
#include <limits>
|
||||||
#include <sstream>
|
|
||||||
#include <streambuf>
|
#include <streambuf>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
|
@ -69,12 +65,13 @@ struct CurlAgent::Impl
|
||||||
std::string error_headers ;
|
std::string error_headers ;
|
||||||
std::string error_data ;
|
std::string error_data ;
|
||||||
DataStream *dest ;
|
DataStream *dest ;
|
||||||
|
u64_t total_download, total_upload ;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
static struct curl_slist* SetHeader( CURL* handle, const Header& hdr );
|
static struct curl_slist* SetHeader( CURL* handle, const Header& hdr );
|
||||||
|
|
||||||
CurlAgent::CurlAgent() :
|
CurlAgent::CurlAgent() : Agent(),
|
||||||
m_pimpl( new Impl )
|
m_pimpl( new Impl ), m_pb( 0 )
|
||||||
{
|
{
|
||||||
m_pimpl->curl = ::curl_easy_init();
|
m_pimpl->curl = ::curl_easy_init();
|
||||||
}
|
}
|
||||||
|
@ -87,10 +84,15 @@ void CurlAgent::Init()
|
||||||
::curl_easy_setopt( m_pimpl->curl, CURLOPT_HEADERFUNCTION, &CurlAgent::HeaderCallback ) ;
|
::curl_easy_setopt( m_pimpl->curl, CURLOPT_HEADERFUNCTION, &CurlAgent::HeaderCallback ) ;
|
||||||
::curl_easy_setopt( m_pimpl->curl, CURLOPT_HEADERDATA, this ) ;
|
::curl_easy_setopt( m_pimpl->curl, CURLOPT_HEADERDATA, this ) ;
|
||||||
::curl_easy_setopt( m_pimpl->curl, CURLOPT_HEADER, 0L ) ;
|
::curl_easy_setopt( m_pimpl->curl, CURLOPT_HEADER, 0L ) ;
|
||||||
|
if ( mMaxUpload > 0 )
|
||||||
|
::curl_easy_setopt( m_pimpl->curl, CURLOPT_MAX_SEND_SPEED_LARGE, mMaxUpload ) ;
|
||||||
|
if ( mMaxDownload > 0 )
|
||||||
|
::curl_easy_setopt( m_pimpl->curl, CURLOPT_MAX_RECV_SPEED_LARGE, mMaxDownload ) ;
|
||||||
m_pimpl->error = false;
|
m_pimpl->error = false;
|
||||||
m_pimpl->error_headers = "";
|
m_pimpl->error_headers = "";
|
||||||
m_pimpl->error_data = "";
|
m_pimpl->error_data = "";
|
||||||
m_pimpl->dest = NULL;
|
m_pimpl->dest = NULL;
|
||||||
|
m_pimpl->total_download = m_pimpl->total_upload = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
CurlAgent::~CurlAgent()
|
CurlAgent::~CurlAgent()
|
||||||
|
@ -108,6 +110,11 @@ void CurlAgent::SetLog(ResponseLog *log)
|
||||||
m_log.reset( log );
|
m_log.reset( log );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void CurlAgent::SetProgressReporter(Progress *progress)
|
||||||
|
{
|
||||||
|
m_pb = progress;
|
||||||
|
}
|
||||||
|
|
||||||
std::size_t CurlAgent::HeaderCallback( void *ptr, size_t size, size_t nmemb, CurlAgent *pthis )
|
std::size_t CurlAgent::HeaderCallback( void *ptr, size_t size, size_t nmemb, CurlAgent *pthis )
|
||||||
{
|
{
|
||||||
char *str = static_cast<char*>(ptr) ;
|
char *str = static_cast<char*>(ptr) ;
|
||||||
|
@ -128,7 +135,7 @@ std::size_t CurlAgent::HeaderCallback( void *ptr, size_t size, size_t nmemb, Cur
|
||||||
if ( pos != line.npos )
|
if ( pos != line.npos )
|
||||||
{
|
{
|
||||||
std::size_t end_pos = line.find( "\r\n", pos ) ;
|
std::size_t end_pos = line.find( "\r\n", pos ) ;
|
||||||
pthis->m_pimpl->location = line.substr( loc.size(), end_pos - loc.size() ) ;
|
pthis->m_pimpl->location = line.substr( pos+loc.size(), end_pos - loc.size() ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
return size*nmemb ;
|
return size*nmemb ;
|
||||||
|
@ -139,6 +146,7 @@ std::size_t CurlAgent::Receive( void* ptr, size_t size, size_t nmemb, CurlAgent
|
||||||
assert( pthis != 0 ) ;
|
assert( pthis != 0 ) ;
|
||||||
if ( pthis->m_log.get() )
|
if ( pthis->m_log.get() )
|
||||||
pthis->m_log->Write( (const char*)ptr, size*nmemb );
|
pthis->m_log->Write( (const char*)ptr, size*nmemb );
|
||||||
|
|
||||||
if ( pthis->m_pimpl->error && pthis->m_pimpl->error_data.size() < 65536 )
|
if ( pthis->m_pimpl->error && pthis->m_pimpl->error_data.size() < 65536 )
|
||||||
{
|
{
|
||||||
// Do not feed error responses to destination stream
|
// Do not feed error responses to destination stream
|
||||||
|
@ -148,6 +156,22 @@ std::size_t CurlAgent::Receive( void* ptr, size_t size, size_t nmemb, CurlAgent
|
||||||
return pthis->m_pimpl->dest->Write( static_cast<char*>(ptr), size * nmemb ) ;
|
return pthis->m_pimpl->dest->Write( static_cast<char*>(ptr), size * nmemb ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int CurlAgent::progress_callback( CurlAgent *pthis, curl_off_t totalDownload, curl_off_t finishedDownload, curl_off_t totalUpload, curl_off_t finishedUpload )
|
||||||
|
{
|
||||||
|
// Only report download progress when set explicitly
|
||||||
|
if ( pthis->m_pb )
|
||||||
|
{
|
||||||
|
totalDownload = pthis->m_pimpl->total_download;
|
||||||
|
if ( !totalUpload )
|
||||||
|
totalUpload = pthis->m_pimpl->total_upload;
|
||||||
|
pthis->m_pb->reportProgress(
|
||||||
|
totalDownload > 0 ? totalDownload : totalUpload,
|
||||||
|
totalDownload > 0 ? finishedDownload : finishedUpload
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
long CurlAgent::ExecCurl(
|
long CurlAgent::ExecCurl(
|
||||||
const std::string& url,
|
const std::string& url,
|
||||||
DataStream *dest,
|
DataStream *dest,
|
||||||
|
@ -165,6 +189,12 @@ long CurlAgent::ExecCurl(
|
||||||
|
|
||||||
struct curl_slist *slist = SetHeader( m_pimpl->curl, hdr ) ;
|
struct curl_slist *slist = SetHeader( m_pimpl->curl, hdr ) ;
|
||||||
|
|
||||||
|
curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L);
|
||||||
|
#if LIBCURL_VERSION_NUM >= 0x072000
|
||||||
|
curl_easy_setopt(curl, CURLOPT_XFERINFOFUNCTION, progress_callback);
|
||||||
|
curl_easy_setopt(curl, CURLOPT_XFERINFODATA, this);
|
||||||
|
#endif
|
||||||
|
|
||||||
CURLcode curl_code = ::curl_easy_perform(curl);
|
CURLcode curl_code = ::curl_easy_perform(curl);
|
||||||
|
|
||||||
curl_slist_free_all(slist);
|
curl_slist_free_all(slist);
|
||||||
|
@ -199,11 +229,13 @@ long CurlAgent::Request(
|
||||||
const std::string& url,
|
const std::string& url,
|
||||||
SeekStream *in,
|
SeekStream *in,
|
||||||
DataStream *dest,
|
DataStream *dest,
|
||||||
const Header& hdr )
|
const Header& hdr,
|
||||||
|
u64_t downloadFileBytes )
|
||||||
{
|
{
|
||||||
Trace("HTTP %1% \"%2%\"", method, url ) ;
|
Trace("HTTP %1% \"%2%\"", method, url ) ;
|
||||||
|
|
||||||
Init() ;
|
Init() ;
|
||||||
|
m_pimpl->total_download = downloadFileBytes ;
|
||||||
CURL *curl = m_pimpl->curl ;
|
CURL *curl = m_pimpl->curl ;
|
||||||
|
|
||||||
// set common options
|
// set common options
|
||||||
|
|
|
@ -24,6 +24,8 @@
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
|
#include <curl/curl.h>
|
||||||
|
|
||||||
namespace gr {
|
namespace gr {
|
||||||
|
|
||||||
class DataStream ;
|
class DataStream ;
|
||||||
|
@ -43,13 +45,15 @@ public :
|
||||||
|
|
||||||
ResponseLog* GetLog() const ;
|
ResponseLog* GetLog() const ;
|
||||||
void SetLog( ResponseLog *log ) ;
|
void SetLog( ResponseLog *log ) ;
|
||||||
|
void SetProgressReporter( Progress *progress ) ;
|
||||||
|
|
||||||
long Request(
|
long Request(
|
||||||
const std::string& method,
|
const std::string& method,
|
||||||
const std::string& url,
|
const std::string& url,
|
||||||
SeekStream *in,
|
SeekStream *in,
|
||||||
DataStream *dest,
|
DataStream *dest,
|
||||||
const Header& hdr ) ;
|
const Header& hdr,
|
||||||
|
u64_t downloadFileBytes = 0 ) ;
|
||||||
|
|
||||||
std::string LastError() const ;
|
std::string LastError() const ;
|
||||||
std::string LastErrorHeaders() const ;
|
std::string LastErrorHeaders() const ;
|
||||||
|
@ -59,6 +63,8 @@ public :
|
||||||
std::string Escape( const std::string& str ) ;
|
std::string Escape( const std::string& str ) ;
|
||||||
std::string Unescape( const std::string& str ) ;
|
std::string Unescape( const std::string& str ) ;
|
||||||
|
|
||||||
|
static int progress_callback( CurlAgent *pthis, curl_off_t totalDownload, curl_off_t finishedDownload, curl_off_t totalUpload, curl_off_t finishedUpload );
|
||||||
|
|
||||||
private :
|
private :
|
||||||
static std::size_t HeaderCallback( void *ptr, size_t size, size_t nmemb, CurlAgent *pthis ) ;
|
static std::size_t HeaderCallback( void *ptr, size_t size, size_t nmemb, CurlAgent *pthis ) ;
|
||||||
static std::size_t Receive( void* ptr, size_t size, size_t nmemb, CurlAgent *pthis ) ;
|
static std::size_t Receive( void* ptr, size_t size, size_t nmemb, CurlAgent *pthis ) ;
|
||||||
|
@ -72,8 +78,9 @@ private :
|
||||||
|
|
||||||
private :
|
private :
|
||||||
struct Impl ;
|
struct Impl ;
|
||||||
std::auto_ptr<Impl> m_pimpl ;
|
std::unique_ptr<Impl> m_pimpl ;
|
||||||
std::auto_ptr<ResponseLog> m_log ;
|
std::unique_ptr<ResponseLog> m_log ;
|
||||||
|
Progress* m_pb ;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
} } // end of namespace
|
} } // end of namespace
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
#include "Download.hh"
|
#include "Download.hh"
|
||||||
// #include "util/SignalHandler.hh"
|
// #include "util/SignalHandler.hh"
|
||||||
|
|
||||||
#include "Error.hh"
|
|
||||||
#include "util/Crypt.hh"
|
#include "util/Crypt.hh"
|
||||||
|
|
||||||
// boost headers
|
// boost headers
|
||||||
|
|
|
@ -48,7 +48,7 @@ public :
|
||||||
|
|
||||||
private :
|
private :
|
||||||
File m_file ;
|
File m_file ;
|
||||||
std::auto_ptr<crypt::MD5> m_crypt ;
|
std::unique_ptr<crypt::MD5> m_crypt ;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
} } // end of namespace
|
} } // end of namespace
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <iterator>
|
#include <iterator>
|
||||||
#include <ostream>
|
#include <ostream>
|
||||||
|
#include <sstream>
|
||||||
|
|
||||||
namespace gr { namespace http {
|
namespace gr { namespace http {
|
||||||
|
|
||||||
|
@ -34,6 +35,13 @@ void Header::Add( const std::string& str )
|
||||||
m_vec.push_back( str ) ;
|
m_vec.push_back( str ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string Header::Str() const
|
||||||
|
{
|
||||||
|
std::ostringstream s ;
|
||||||
|
s << *this ;
|
||||||
|
return s.str() ;
|
||||||
|
}
|
||||||
|
|
||||||
Header::iterator Header::begin() const
|
Header::iterator Header::begin() const
|
||||||
{
|
{
|
||||||
return m_vec.begin() ;
|
return m_vec.begin() ;
|
||||||
|
|
|
@ -37,6 +37,7 @@ public :
|
||||||
Header() ;
|
Header() ;
|
||||||
|
|
||||||
void Add( const std::string& str ) ;
|
void Add( const std::string& str ) ;
|
||||||
|
std::string Str() const ;
|
||||||
|
|
||||||
iterator begin() const ;
|
iterator begin() const ;
|
||||||
iterator end() const ;
|
iterator end() const ;
|
||||||
|
|
|
@ -20,15 +20,10 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "util/DataStream.hh"
|
#include "util/DataStream.hh"
|
||||||
|
#include "xml/TreeBuilder.hh"
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
namespace gr { namespace xml
|
|
||||||
{
|
|
||||||
class Node ;
|
|
||||||
class TreeBuilder ;
|
|
||||||
} }
|
|
||||||
|
|
||||||
namespace gr { namespace http {
|
namespace gr { namespace http {
|
||||||
|
|
||||||
class XmlResponse : public DataStream
|
class XmlResponse : public DataStream
|
||||||
|
@ -44,7 +39,7 @@ public :
|
||||||
xml::Node Response() const ;
|
xml::Node Response() const ;
|
||||||
|
|
||||||
private :
|
private :
|
||||||
std::auto_ptr<xml::TreeBuilder> m_tb ;
|
std::unique_ptr<xml::TreeBuilder> m_tb ;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
} } // end of namespace
|
} } // end of namespace
|
||||||
|
|
|
@ -50,7 +50,7 @@ public :
|
||||||
|
|
||||||
private :
|
private :
|
||||||
struct Impl ;
|
struct Impl ;
|
||||||
std::auto_ptr<Impl> m_impl ;
|
std::unique_ptr<Impl> m_impl ;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
} // end of namespace
|
} // end of namespace
|
||||||
|
|
|
@ -51,7 +51,7 @@ private :
|
||||||
|
|
||||||
private :
|
private :
|
||||||
struct Impl ;
|
struct Impl ;
|
||||||
std::auto_ptr<Impl> m_impl ;
|
std::unique_ptr<Impl> m_impl ;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
std::string WriteJson( const Val& val );
|
std::string WriteJson( const Val& val );
|
||||||
|
|
|
@ -91,6 +91,18 @@ const Val& Val::operator[]( const std::string& key ) const
|
||||||
throw ;
|
throw ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Val& Val::operator[]( const std::string& key )
|
||||||
|
{
|
||||||
|
Object& obj = As<Object>() ;
|
||||||
|
Object::iterator i = obj.find(key) ;
|
||||||
|
if ( i != obj.end() )
|
||||||
|
return i->second ;
|
||||||
|
|
||||||
|
// shut off compiler warning
|
||||||
|
BOOST_THROW_EXCEPTION(Error() << NoKey_(key)) ;
|
||||||
|
throw ;
|
||||||
|
}
|
||||||
|
|
||||||
const Val& Val::operator[]( std::size_t index ) const
|
const Val& Val::operator[]( std::size_t index ) const
|
||||||
{
|
{
|
||||||
const Array& ar = As<Array>() ;
|
const Array& ar = As<Array>() ;
|
||||||
|
@ -104,12 +116,14 @@ const Val& Val::operator[]( std::size_t index ) const
|
||||||
|
|
||||||
std::string Val::Str() const
|
std::string Val::Str() const
|
||||||
{
|
{
|
||||||
|
if ( Type() == int_type )
|
||||||
|
return boost::to_string( As<long long>() );
|
||||||
return As<std::string>() ;
|
return As<std::string>() ;
|
||||||
}
|
}
|
||||||
|
|
||||||
Val::operator std::string() const
|
Val::operator std::string() const
|
||||||
{
|
{
|
||||||
return As<std::string>() ;
|
return Str();
|
||||||
}
|
}
|
||||||
|
|
||||||
int Val::Int() const
|
int Val::Int() const
|
||||||
|
@ -119,6 +133,13 @@ int Val::Int() const
|
||||||
return static_cast<int>(As<long long>()) ;
|
return static_cast<int>(As<long long>()) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned long long Val::U64() const
|
||||||
|
{
|
||||||
|
if ( Type() == string_type )
|
||||||
|
return strtoull( As<std::string>().c_str(), NULL, 10 );
|
||||||
|
return static_cast<unsigned long long>(As<long long>()) ;
|
||||||
|
}
|
||||||
|
|
||||||
double Val::Double() const
|
double Val::Double() const
|
||||||
{
|
{
|
||||||
if ( Type() == string_type )
|
if ( Type() == string_type )
|
||||||
|
@ -136,17 +157,38 @@ const Val::Array& Val::AsArray() const
|
||||||
return As<Array>() ;
|
return As<Array>() ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Val::Array& Val::AsArray()
|
||||||
|
{
|
||||||
|
return As<Array>() ;
|
||||||
|
}
|
||||||
|
|
||||||
const Val::Object& Val::AsObject() const
|
const Val::Object& Val::AsObject() const
|
||||||
{
|
{
|
||||||
return As<Object>() ;
|
return As<Object>() ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Val::Object& Val::AsObject()
|
||||||
|
{
|
||||||
|
return As<Object>() ;
|
||||||
|
}
|
||||||
|
|
||||||
bool Val::Has( const std::string& key ) const
|
bool Val::Has( const std::string& key ) const
|
||||||
{
|
{
|
||||||
const Object& obj = As<Object>() ;
|
const Object& obj = As<Object>() ;
|
||||||
return obj.find(key) != obj.end() ;
|
return obj.find(key) != obj.end() ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool Val::Del( const std::string& key )
|
||||||
|
{
|
||||||
|
Object& obj = As<Object>() ;
|
||||||
|
return obj.erase(key) > 0 ;
|
||||||
|
}
|
||||||
|
|
||||||
|
Val& Val::Item( const std::string& key )
|
||||||
|
{
|
||||||
|
return As<Object>()[key];
|
||||||
|
}
|
||||||
|
|
||||||
bool Val::Get( const std::string& key, Val& val ) const
|
bool Val::Get( const std::string& key, Val& val ) const
|
||||||
{
|
{
|
||||||
const Object& obj = As<Object>() ;
|
const Object& obj = As<Object>() ;
|
||||||
|
@ -165,6 +207,16 @@ void Val::Add( const std::string& key, const Val& value )
|
||||||
As<Object>().insert( std::make_pair(key, value) ) ;
|
As<Object>().insert( std::make_pair(key, value) ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Val::Set( const std::string& key, const Val& value )
|
||||||
|
{
|
||||||
|
Object& obj = As<Object>();
|
||||||
|
Object::iterator i = obj.find(key);
|
||||||
|
if (i == obj.end())
|
||||||
|
obj.insert(std::make_pair(key, value));
|
||||||
|
else
|
||||||
|
i->second = value;
|
||||||
|
}
|
||||||
|
|
||||||
void Val::Add( const Val& json )
|
void Val::Add( const Val& json )
|
||||||
{
|
{
|
||||||
As<Array>().push_back( json ) ;
|
As<Array>().push_back( json ) ;
|
||||||
|
|
|
@ -94,24 +94,29 @@ public :
|
||||||
|
|
||||||
TypeEnum Type() const ;
|
TypeEnum Type() const ;
|
||||||
|
|
||||||
const Val& operator[]( const std::string& key ) const ;
|
|
||||||
const Val& operator[]( std::size_t index ) const ;
|
|
||||||
|
|
||||||
// shortcuts for As<>()
|
// shortcuts for As<>()
|
||||||
std::string Str() const ;
|
std::string Str() const ;
|
||||||
int Int() const ;
|
int Int() const ;
|
||||||
long Long() const ;
|
unsigned long long U64() const ;
|
||||||
double Double() const ;
|
double Double() const ;
|
||||||
bool Bool() const ;
|
bool Bool() const ;
|
||||||
const Array& AsArray() const ;
|
const Array& AsArray() const ;
|
||||||
|
Array& AsArray() ;
|
||||||
const Object& AsObject() const ;
|
const Object& AsObject() const ;
|
||||||
|
Object& AsObject() ;
|
||||||
|
|
||||||
// shortcuts for objects
|
// shortcuts for objects
|
||||||
bool Has( const std::string& key ) const ;
|
Val& operator[]( const std::string& key ) ; // get updatable ref or throw
|
||||||
bool Get( const std::string& key, Val& val ) const ;
|
const Val& operator[]( const std::string& key ) const ; // get const ref or throw
|
||||||
void Add( const std::string& key, const Val& val ) ;
|
Val& Item( const std::string& key ) ; // insert if not exists and get
|
||||||
|
bool Has( const std::string& key ) const ; // check if exists
|
||||||
|
bool Get( const std::string& key, Val& val ) const ; // get or return false
|
||||||
|
void Add( const std::string& key, const Val& val ) ; // insert or do nothing
|
||||||
|
void Set( const std::string& key, const Val& val ) ; // insert or update
|
||||||
|
bool Del( const std::string& key ); // delete or do nothing
|
||||||
|
|
||||||
// shortcuts for array (and array of objects)
|
// shortcuts for array (and array of objects)
|
||||||
|
const Val& operator[]( std::size_t index ) const ;
|
||||||
void Add( const Val& json ) ;
|
void Add( const Val& json ) ;
|
||||||
|
|
||||||
std::vector<Val> Select( const std::string& key ) const ;
|
std::vector<Val> Select( const std::string& key ) const ;
|
||||||
|
@ -125,7 +130,7 @@ private :
|
||||||
template <typename T>
|
template <typename T>
|
||||||
struct Impl ;
|
struct Impl ;
|
||||||
|
|
||||||
std::auto_ptr<Base> m_base ;
|
std::unique_ptr<Base> m_base ;
|
||||||
|
|
||||||
private :
|
private :
|
||||||
void Select( const Object& obj, const std::string& key, std::vector<Val>& result ) const ;
|
void Select( const Object& obj, const std::string& key, std::vector<Val>& result ) const ;
|
||||||
|
@ -189,35 +194,29 @@ Val& Val::Assign( const T& t )
|
||||||
template <typename T>
|
template <typename T>
|
||||||
const T& Val::As() const
|
const T& Val::As() const
|
||||||
{
|
{
|
||||||
try
|
const Impl<T> *impl = dynamic_cast<const Impl<T> *>( m_base.get() ) ;
|
||||||
{
|
if ( !impl )
|
||||||
const Impl<T> *impl = &dynamic_cast<const Impl<T>&>( *m_base ) ;
|
|
||||||
return impl->val ;
|
|
||||||
}
|
|
||||||
catch ( std::exception& e )
|
|
||||||
{
|
{
|
||||||
TypeEnum dest = Type2Enum<T>::type ;
|
TypeEnum dest = Type2Enum<T>::type ;
|
||||||
BOOST_THROW_EXCEPTION(
|
BOOST_THROW_EXCEPTION(
|
||||||
Error() << SrcType_( Type() ) << DestType_( dest )
|
Error() << SrcType_( Type() ) << DestType_( dest )
|
||||||
) ;
|
) ;
|
||||||
}
|
}
|
||||||
|
return impl->val ;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
T& Val::As()
|
T& Val::As()
|
||||||
{
|
{
|
||||||
try
|
Impl<T> *impl = dynamic_cast<Impl<T> *>( m_base.get() ) ;
|
||||||
{
|
if ( !impl )
|
||||||
Impl<T> *impl = &dynamic_cast<Impl<T>&>( *m_base ) ;
|
|
||||||
return impl->val ;
|
|
||||||
}
|
|
||||||
catch ( std::exception& e )
|
|
||||||
{
|
{
|
||||||
TypeEnum dest = Type2Enum<T>::type ;
|
TypeEnum dest = Type2Enum<T>::type ;
|
||||||
BOOST_THROW_EXCEPTION(
|
BOOST_THROW_EXCEPTION(
|
||||||
Error() << SrcType_( Type() ) << DestType_( dest )
|
Error() << SrcType_( Type() ) << DestType_( dest )
|
||||||
) ;
|
) ;
|
||||||
}
|
}
|
||||||
|
return impl->val ;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
|
|
|
@ -32,6 +32,7 @@ namespace gr {
|
||||||
using namespace http ;
|
using namespace http ;
|
||||||
|
|
||||||
AuthAgent::AuthAgent( OAuth2& auth, Agent *real_agent ) :
|
AuthAgent::AuthAgent( OAuth2& auth, Agent *real_agent ) :
|
||||||
|
Agent(),
|
||||||
m_auth ( auth ),
|
m_auth ( auth ),
|
||||||
m_agent ( real_agent )
|
m_agent ( real_agent )
|
||||||
{
|
{
|
||||||
|
@ -47,6 +48,21 @@ void AuthAgent::SetLog( http::ResponseLog *log )
|
||||||
return m_agent->SetLog( log );
|
return m_agent->SetLog( log );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void AuthAgent::SetProgressReporter( Progress *progress )
|
||||||
|
{
|
||||||
|
m_agent->SetProgressReporter( progress );
|
||||||
|
}
|
||||||
|
|
||||||
|
void AuthAgent::SetUploadSpeed( unsigned kbytes )
|
||||||
|
{
|
||||||
|
m_agent->SetUploadSpeed( kbytes );
|
||||||
|
}
|
||||||
|
|
||||||
|
void AuthAgent::SetDownloadSpeed( unsigned kbytes )
|
||||||
|
{
|
||||||
|
m_agent->SetDownloadSpeed( kbytes );
|
||||||
|
}
|
||||||
|
|
||||||
http::Header AuthAgent::AppendHeader( const http::Header& hdr ) const
|
http::Header AuthAgent::AppendHeader( const http::Header& hdr ) const
|
||||||
{
|
{
|
||||||
http::Header h(hdr) ;
|
http::Header h(hdr) ;
|
||||||
|
@ -60,16 +76,18 @@ long AuthAgent::Request(
|
||||||
const std::string& url,
|
const std::string& url,
|
||||||
SeekStream *in,
|
SeekStream *in,
|
||||||
DataStream *dest,
|
DataStream *dest,
|
||||||
const http::Header& hdr )
|
const http::Header& hdr,
|
||||||
|
u64_t downloadFileBytes )
|
||||||
{
|
{
|
||||||
long response;
|
long response;
|
||||||
Header auth;
|
Header auth;
|
||||||
|
m_interval = 0;
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
auth = AppendHeader( hdr );
|
auth = AppendHeader( hdr );
|
||||||
if ( in )
|
if ( in )
|
||||||
in->Seek( 0, 0 );
|
in->Seek( 0, 0 );
|
||||||
response = m_agent->Request( method, url, in, dest, auth );
|
response = m_agent->Request( method, url, in, dest, auth, downloadFileBytes );
|
||||||
} while ( CheckRetry( response ) );
|
} while ( CheckRetry( response ) );
|
||||||
return CheckHttpResponse( response, url, auth );
|
return CheckHttpResponse( response, url, auth );
|
||||||
}
|
}
|
||||||
|
@ -110,7 +128,17 @@ bool AuthAgent::CheckRetry( long response )
|
||||||
os::Sleep( 5 ) ;
|
os::Sleep( 5 ) ;
|
||||||
return true ;
|
return true ;
|
||||||
}
|
}
|
||||||
|
// HTTP 403 is the result of API rate limiting. attempt exponential backoff and try again
|
||||||
|
else if ( response == 429 || ( response == 403 && (
|
||||||
|
m_agent->LastError().find("\"reason\": \"userRateLimitExceeded\",") != std::string::npos ||
|
||||||
|
m_agent->LastError().find("\"reason\": \"rateLimitExceeded\",") != std::string::npos ) ) )
|
||||||
|
{
|
||||||
|
m_interval = m_interval <= 0 ? 1 : ( m_interval < 64 ? m_interval*2 : 120 );
|
||||||
|
Log( "request failed due to rate limiting: %1% (body: %2%). retrying in %3% seconds",
|
||||||
|
response, m_agent->LastError(), m_interval, log::warning ) ;
|
||||||
|
os::Sleep( m_interval ) ;
|
||||||
|
return true ;
|
||||||
|
}
|
||||||
// HTTP 401 Unauthorized. the auth token has been expired. refresh it
|
// HTTP 401 Unauthorized. the auth token has been expired. refresh it
|
||||||
else if ( response == 401 )
|
else if ( response == 401 )
|
||||||
{
|
{
|
||||||
|
|
|
@ -44,7 +44,8 @@ public :
|
||||||
const std::string& url,
|
const std::string& url,
|
||||||
SeekStream *in,
|
SeekStream *in,
|
||||||
DataStream *dest,
|
DataStream *dest,
|
||||||
const http::Header& hdr ) ;
|
const http::Header& hdr,
|
||||||
|
u64_t downloadFileBytes = 0 ) ;
|
||||||
|
|
||||||
std::string LastError() const ;
|
std::string LastError() const ;
|
||||||
std::string LastErrorHeaders() const ;
|
std::string LastErrorHeaders() const ;
|
||||||
|
@ -54,6 +55,11 @@ public :
|
||||||
std::string Escape( const std::string& str ) ;
|
std::string Escape( const std::string& str ) ;
|
||||||
std::string Unescape( const std::string& str ) ;
|
std::string Unescape( const std::string& str ) ;
|
||||||
|
|
||||||
|
void SetUploadSpeed( unsigned kbytes ) ;
|
||||||
|
void SetDownloadSpeed( unsigned kbytes ) ;
|
||||||
|
|
||||||
|
void SetProgressReporter( Progress *progress ) ;
|
||||||
|
|
||||||
private :
|
private :
|
||||||
http::Header AppendHeader( const http::Header& hdr ) const ;
|
http::Header AppendHeader( const http::Header& hdr ) const ;
|
||||||
bool CheckRetry( long response ) ;
|
bool CheckRetry( long response ) ;
|
||||||
|
@ -65,6 +71,7 @@ private :
|
||||||
private :
|
private :
|
||||||
OAuth2& m_auth ;
|
OAuth2& m_auth ;
|
||||||
http::Agent* m_agent ;
|
http::Agent* m_agent ;
|
||||||
|
int m_interval ;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
} // end of namespace
|
} // end of namespace
|
||||||
|
|
|
@ -25,6 +25,13 @@
|
||||||
#include "http/Header.hh"
|
#include "http/Header.hh"
|
||||||
#include "util/log/Log.hh"
|
#include "util/log/Log.hh"
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
#include <sys/socket.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <errno.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <poll.h>
|
||||||
|
|
||||||
// for debugging
|
// for debugging
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
|
@ -37,8 +44,8 @@ OAuth2::OAuth2(
|
||||||
const std::string& refresh_code,
|
const std::string& refresh_code,
|
||||||
const std::string& client_id,
|
const std::string& client_id,
|
||||||
const std::string& client_secret ) :
|
const std::string& client_secret ) :
|
||||||
m_agent( agent ),
|
|
||||||
m_refresh( refresh_code ),
|
m_refresh( refresh_code ),
|
||||||
|
m_agent( agent ),
|
||||||
m_client_id( client_id ),
|
m_client_id( client_id ),
|
||||||
m_client_secret( client_secret )
|
m_client_secret( client_secret )
|
||||||
{
|
{
|
||||||
|
@ -50,18 +57,29 @@ OAuth2::OAuth2(
|
||||||
const std::string& client_id,
|
const std::string& client_id,
|
||||||
const std::string& client_secret ) :
|
const std::string& client_secret ) :
|
||||||
m_agent( agent ),
|
m_agent( agent ),
|
||||||
|
m_port( 0 ),
|
||||||
|
m_socket( -1 ),
|
||||||
m_client_id( client_id ),
|
m_client_id( client_id ),
|
||||||
m_client_secret( client_secret )
|
m_client_secret( client_secret )
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void OAuth2::Auth( const std::string& auth_code )
|
OAuth2::~OAuth2()
|
||||||
|
{
|
||||||
|
if ( m_socket >= 0 )
|
||||||
|
{
|
||||||
|
close( m_socket );
|
||||||
|
m_socket = -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool OAuth2::Auth( const std::string& auth_code )
|
||||||
{
|
{
|
||||||
std::string post =
|
std::string post =
|
||||||
"code=" + auth_code +
|
"code=" + auth_code +
|
||||||
"&client_id=" + m_client_id +
|
"&client_id=" + m_client_id +
|
||||||
"&client_secret=" + m_client_secret +
|
"&client_secret=" + m_client_secret +
|
||||||
"&redirect_uri=" + "urn:ietf:wg:oauth:2.0:oob" +
|
"&redirect_uri=http%3A%2F%2Flocalhost:" + std::to_string( m_port ) + "%2Fauth" +
|
||||||
"&grant_type=authorization_code" ;
|
"&grant_type=authorization_code" ;
|
||||||
|
|
||||||
http::ValResponse resp ;
|
http::ValResponse resp ;
|
||||||
|
@ -77,24 +95,120 @@ void OAuth2::Auth( const std::string& auth_code )
|
||||||
{
|
{
|
||||||
Log( "Failed to obtain auth token: HTTP %1%, body: %2%",
|
Log( "Failed to obtain auth token: HTTP %1%, body: %2%",
|
||||||
code, m_agent->LastError(), log::error ) ;
|
code, m_agent->LastError(), log::error ) ;
|
||||||
BOOST_THROW_EXCEPTION( AuthFailed() );
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string OAuth2::MakeAuthURL()
|
std::string OAuth2::MakeAuthURL()
|
||||||
{
|
{
|
||||||
|
if ( !m_port )
|
||||||
|
{
|
||||||
|
sockaddr_storage addr = { 0 };
|
||||||
|
addr.ss_family = AF_INET;
|
||||||
|
m_socket = socket( AF_INET, SOCK_STREAM, 0 );
|
||||||
|
if ( m_socket < 0 )
|
||||||
|
throw std::runtime_error( std::string("socket: ") + strerror(errno) );
|
||||||
|
if ( bind( m_socket, (sockaddr*)&addr, sizeof( addr ) ) < 0 )
|
||||||
|
{
|
||||||
|
close( m_socket );
|
||||||
|
m_socket = -1;
|
||||||
|
throw std::runtime_error( std::string("bind: ") + strerror(errno) );
|
||||||
|
}
|
||||||
|
socklen_t len = sizeof( addr );
|
||||||
|
if ( getsockname( m_socket, (sockaddr *)&addr, &len ) == -1 )
|
||||||
|
{
|
||||||
|
close( m_socket );
|
||||||
|
m_socket = -1;
|
||||||
|
throw std::runtime_error( std::string("getsockname: ") + strerror(errno) );
|
||||||
|
}
|
||||||
|
m_port = ntohs(((sockaddr_in*)&addr)->sin_port);
|
||||||
|
if ( listen( m_socket, 128 ) < 0 )
|
||||||
|
{
|
||||||
|
close( m_socket );
|
||||||
|
m_socket = -1;
|
||||||
|
m_port = 0;
|
||||||
|
throw std::runtime_error( std::string("listen: ") + strerror(errno) );
|
||||||
|
}
|
||||||
|
}
|
||||||
return "https://accounts.google.com/o/oauth2/auth"
|
return "https://accounts.google.com/o/oauth2/auth"
|
||||||
"?scope=" +
|
"?scope=" + m_agent->Escape( "https://www.googleapis.com/auth/drive" ) +
|
||||||
m_agent->Escape( "https://www.googleapis.com/auth/userinfo.email" ) + "+" +
|
"&redirect_uri=http%3A%2F%2Flocalhost:" + std::to_string( m_port ) + "%2Fauth" +
|
||||||
m_agent->Escape( "https://www.googleapis.com/auth/userinfo.profile" ) + "+" +
|
|
||||||
m_agent->Escape( "https://docs.google.com/feeds/" ) + "+" +
|
|
||||||
m_agent->Escape( "https://docs.googleusercontent.com/" ) + "+" +
|
|
||||||
m_agent->Escape( "https://spreadsheets.google.com/feeds/" ) +
|
|
||||||
"&redirect_uri=urn:ietf:wg:oauth:2.0:oob"
|
|
||||||
"&response_type=code"
|
"&response_type=code"
|
||||||
"&client_id=" + m_client_id ;
|
"&client_id=" + m_client_id ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool OAuth2::GetCode( )
|
||||||
|
{
|
||||||
|
sockaddr_storage addr = { 0 };
|
||||||
|
int peer_fd = -1;
|
||||||
|
while ( peer_fd < 0 )
|
||||||
|
{
|
||||||
|
socklen_t peer_addr_size = sizeof( addr );
|
||||||
|
peer_fd = accept( m_socket, (sockaddr*)&addr, &peer_addr_size );
|
||||||
|
if ( peer_fd == -1 && errno != EAGAIN && errno != EINTR )
|
||||||
|
throw std::runtime_error( std::string("accept: ") + strerror(errno) );
|
||||||
|
}
|
||||||
|
fcntl( peer_fd, F_SETFL, fcntl( peer_fd, F_GETFL, 0 ) | O_NONBLOCK );
|
||||||
|
struct pollfd pfd = (struct pollfd){
|
||||||
|
.fd = peer_fd,
|
||||||
|
.events = POLLIN|POLLRDHUP,
|
||||||
|
};
|
||||||
|
char buf[4096];
|
||||||
|
std::string request;
|
||||||
|
while ( true )
|
||||||
|
{
|
||||||
|
pfd.revents = 0;
|
||||||
|
poll( &pfd, 1, -1 );
|
||||||
|
if ( pfd.revents & POLLRDHUP )
|
||||||
|
break;
|
||||||
|
int r = 1;
|
||||||
|
while ( r > 0 )
|
||||||
|
{
|
||||||
|
r = read( peer_fd, buf, sizeof( buf ) );
|
||||||
|
if ( r > 0 )
|
||||||
|
request += std::string( buf, r );
|
||||||
|
else if ( r == 0 )
|
||||||
|
break;
|
||||||
|
else if ( errno != EAGAIN && errno != EINTR )
|
||||||
|
throw std::runtime_error( std::string("read: ") + strerror(errno) );
|
||||||
|
}
|
||||||
|
if ( r == 0 || ( r < 0 && request.find( "\n" ) > 0 ) ) // GET ... HTTP/1.1\r\n
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
bool ok = false;
|
||||||
|
if ( request.substr( 0, 10 ) == "GET /auth?" )
|
||||||
|
{
|
||||||
|
std::string line = request;
|
||||||
|
int p = line.find( "\n" );
|
||||||
|
if ( p > 0 )
|
||||||
|
line = line.substr( 0, p );
|
||||||
|
p = line.rfind( " " );
|
||||||
|
if ( p > 0 )
|
||||||
|
line = line.substr( 0, p );
|
||||||
|
p = line.find( "code=" );
|
||||||
|
if ( p > 0 )
|
||||||
|
line = line.substr( p+5 );
|
||||||
|
p = line.find( "&" );
|
||||||
|
if ( p > 0 )
|
||||||
|
line = line.substr( 0, p );
|
||||||
|
ok = Auth( line );
|
||||||
|
}
|
||||||
|
std::string response = ( ok
|
||||||
|
? "Authenticated successfully. Please close the page"
|
||||||
|
: "Authentication error. Please try again" );
|
||||||
|
response = "HTTP/1.1 200 OK\r\n"
|
||||||
|
"Content-Type: text/html; charset=utf-8\r\n"
|
||||||
|
"Connection: close\r\n"
|
||||||
|
"\r\n"+
|
||||||
|
response+
|
||||||
|
"\r\n";
|
||||||
|
write( peer_fd, response.c_str(), response.size() );
|
||||||
|
close( peer_fd );
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
|
||||||
void OAuth2::Refresh( )
|
void OAuth2::Refresh( )
|
||||||
{
|
{
|
||||||
std::string post =
|
std::string post =
|
||||||
|
|
|
@ -41,13 +41,15 @@ public :
|
||||||
const std::string& refresh_code,
|
const std::string& refresh_code,
|
||||||
const std::string& client_id,
|
const std::string& client_id,
|
||||||
const std::string& client_secret ) ;
|
const std::string& client_secret ) ;
|
||||||
|
~OAuth2( ) ;
|
||||||
|
|
||||||
std::string Str() const ;
|
std::string Str() const ;
|
||||||
|
|
||||||
std::string MakeAuthURL() ;
|
std::string MakeAuthURL() ;
|
||||||
|
|
||||||
void Auth( const std::string& auth_code ) ;
|
bool Auth( const std::string& auth_code ) ;
|
||||||
void Refresh( ) ;
|
void Refresh( ) ;
|
||||||
|
bool GetCode( ) ;
|
||||||
|
|
||||||
std::string RefreshToken( ) const ;
|
std::string RefreshToken( ) const ;
|
||||||
std::string AccessToken( ) const ;
|
std::string AccessToken( ) const ;
|
||||||
|
@ -59,6 +61,8 @@ private :
|
||||||
std::string m_access ;
|
std::string m_access ;
|
||||||
std::string m_refresh ;
|
std::string m_refresh ;
|
||||||
http::Agent* m_agent ;
|
http::Agent* m_agent ;
|
||||||
|
int m_port ;
|
||||||
|
int m_socket ;
|
||||||
|
|
||||||
const std::string m_client_id ;
|
const std::string m_client_id ;
|
||||||
const std::string m_client_secret ;
|
const std::string m_client_secret ;
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
namespace gr {
|
namespace gr {
|
||||||
|
|
||||||
ConcatStream::ConcatStream() :
|
ConcatStream::ConcatStream() :
|
||||||
m_cur( 0 ), m_size( 0 ), m_pos( 0 )
|
m_size( 0 ), m_pos( 0 ), m_cur( 0 )
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,13 +63,13 @@ off_t ConcatStream::Seek( off_t offset, int whence )
|
||||||
offset += m_pos;
|
offset += m_pos;
|
||||||
else if ( whence == 2 )
|
else if ( whence == 2 )
|
||||||
offset += Size();
|
offset += Size();
|
||||||
if ( offset > Size() )
|
if ( (u64_t)offset > Size() )
|
||||||
offset = Size();
|
offset = Size();
|
||||||
m_cur = 0;
|
m_cur = 0;
|
||||||
m_pos = offset;
|
m_pos = offset;
|
||||||
if ( m_streams.size() )
|
if ( m_streams.size() )
|
||||||
{
|
{
|
||||||
while ( offset > m_sizes[m_cur] )
|
while ( (u64_t)offset > m_sizes[m_cur] )
|
||||||
m_cur++;
|
m_cur++;
|
||||||
m_streams[m_cur]->Seek( offset - ( m_cur > 0 ? m_sizes[m_cur-1] : 0 ), 0 );
|
m_streams[m_cur]->Seek( offset - ( m_cur > 0 ? m_sizes[m_cur-1] : 0 ), 0 );
|
||||||
}
|
}
|
||||||
|
@ -90,7 +90,7 @@ void ConcatStream::Append( SeekStream *stream )
|
||||||
{
|
{
|
||||||
if ( stream )
|
if ( stream )
|
||||||
{
|
{
|
||||||
off_t size = stream->Size();
|
u64_t size = stream->Size();
|
||||||
if ( size > 0 )
|
if ( size > 0 )
|
||||||
{
|
{
|
||||||
// "fix" stream size at the moment of adding so further changes of underlying files
|
// "fix" stream size at the moment of adding so further changes of underlying files
|
||||||
|
|
|
@ -41,9 +41,9 @@ public :
|
||||||
|
|
||||||
private :
|
private :
|
||||||
std::vector<SeekStream*> m_streams ;
|
std::vector<SeekStream*> m_streams ;
|
||||||
std::vector<off_t> m_sizes ;
|
std::vector<u64_t> m_sizes ;
|
||||||
off_t m_size, m_pos ;
|
u64_t m_size, m_pos ;
|
||||||
int m_cur ;
|
size_t m_cur ;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
} // end of namespace
|
} // end of namespace
|
||||||
|
|
|
@ -38,6 +38,10 @@ const std::string default_root_folder = ".";
|
||||||
|
|
||||||
Config::Config( const po::variables_map& vm )
|
Config::Config( const po::variables_map& vm )
|
||||||
{
|
{
|
||||||
|
if ( vm.count( "id" ) > 0 )
|
||||||
|
m_cmd.Add( "id", Val( vm["id"].as<std::string>() ) ) ;
|
||||||
|
if ( vm.count( "secret" ) > 0 )
|
||||||
|
m_cmd.Add( "secret", Val( vm["secret"].as<std::string>() ) ) ;
|
||||||
m_cmd.Add( "new-rev", Val(vm.count("new-rev") > 0) ) ;
|
m_cmd.Add( "new-rev", Val(vm.count("new-rev") > 0) ) ;
|
||||||
m_cmd.Add( "force", Val(vm.count("force") > 0 ) ) ;
|
m_cmd.Add( "force", Val(vm.count("force") > 0 ) ) ;
|
||||||
m_cmd.Add( "path", Val(vm.count("path") > 0
|
m_cmd.Add( "path", Val(vm.count("path") > 0
|
||||||
|
@ -48,6 +52,9 @@ Config::Config( const po::variables_map& vm )
|
||||||
: "" ) ) ;
|
: "" ) ) ;
|
||||||
if ( vm.count( "ignore" ) > 0 )
|
if ( vm.count( "ignore" ) > 0 )
|
||||||
m_cmd.Add( "ignore", Val( vm["ignore"].as<std::string>() ) );
|
m_cmd.Add( "ignore", Val( vm["ignore"].as<std::string>() ) );
|
||||||
|
m_cmd.Add( "no-remote-new", Val( vm.count( "no-remote-new" ) > 0 || vm.count( "upload-only" ) > 0 ) );
|
||||||
|
m_cmd.Add( "upload-only", Val( vm.count( "upload-only" ) > 0 ) );
|
||||||
|
m_cmd.Add( "no-delete-remote", Val( vm.count( "no-delete-remote" ) > 0 ) );
|
||||||
|
|
||||||
m_path = GetPath( fs::path(m_cmd["path"].Str()) ) ;
|
m_path = GetPath( fs::path(m_cmd["path"].Str()) ) ;
|
||||||
m_file = Read( ) ;
|
m_file = Read( ) ;
|
||||||
|
@ -77,7 +84,7 @@ void Config::Save( )
|
||||||
|
|
||||||
void Config::Set( const std::string& key, const Val& value )
|
void Config::Set( const std::string& key, const Val& value )
|
||||||
{
|
{
|
||||||
m_file.Add( key, value ) ;
|
m_file.Set( key, value ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
Val Config::Get( const std::string& key ) const
|
Val Config::Get( const std::string& key ) const
|
||||||
|
|
|
@ -24,7 +24,6 @@
|
||||||
#include "MemMap.hh"
|
#include "MemMap.hh"
|
||||||
|
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
#include <sstream>
|
|
||||||
|
|
||||||
// dependent libraries
|
// dependent libraries
|
||||||
#include <gcrypt.h>
|
#include <gcrypt.h>
|
||||||
|
|
|
@ -50,7 +50,7 @@ public :
|
||||||
|
|
||||||
private :
|
private :
|
||||||
struct Impl ;
|
struct Impl ;
|
||||||
std::auto_ptr<Impl> m_impl ;
|
std::unique_ptr<Impl> m_impl ;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
} } // end of namespace gr
|
} } // end of namespace gr
|
||||||
|
|
|
@ -33,7 +33,6 @@
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <iostream>
|
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
|
|
||||||
#include <time.h>
|
#include <time.h>
|
||||||
|
|
|
@ -26,7 +26,6 @@
|
||||||
|
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <iterator>
|
#include <iterator>
|
||||||
#include <sstream>
|
|
||||||
|
|
||||||
namespace gr {
|
namespace gr {
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,10 @@
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
|
|
||||||
|
#if defined(__FreeBSD__) || defined(__OpenBSD__)
|
||||||
|
#include <unistd.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef WIN32
|
#ifdef WIN32
|
||||||
#include <io.h>
|
#include <io.h>
|
||||||
typedef int ssize_t ;
|
typedef int ssize_t ;
|
||||||
|
|
|
@ -178,7 +178,7 @@ public :
|
||||||
|
|
||||||
private :
|
private :
|
||||||
typedef impl::FuncImpl<Type> Impl ;
|
typedef impl::FuncImpl<Type> Impl ;
|
||||||
std::auto_ptr<Impl> m_pimpl ;
|
std::unique_ptr<Impl> m_pimpl ;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
} // end of namespace
|
} // end of namespace
|
||||||
|
|
|
@ -39,12 +39,12 @@
|
||||||
|
|
||||||
namespace gr { namespace os {
|
namespace gr { namespace os {
|
||||||
|
|
||||||
DateTime FileCTime( const fs::path& filename )
|
void Stat( const fs::path& filename, DateTime *t, off_t *size, FileType *ft )
|
||||||
{
|
{
|
||||||
return FileCTime( filename.string() ) ;
|
Stat( filename.string(), t, size, ft ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
DateTime FileCTime( const std::string& filename )
|
void Stat( const std::string& filename, DateTime *t, off64_t *size, FileType *ft )
|
||||||
{
|
{
|
||||||
struct stat s = {} ;
|
struct stat s = {} ;
|
||||||
if ( ::stat( filename.c_str(), &s ) != 0 )
|
if ( ::stat( filename.c_str(), &s ) != 0 )
|
||||||
|
@ -57,12 +57,19 @@ DateTime FileCTime( const std::string& filename )
|
||||||
) ;
|
) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined __APPLE__ && defined __DARWIN_64_BIT_INO_T
|
if ( t )
|
||||||
return DateTime( s.st_ctimespec.tv_sec, s.st_ctimespec.tv_nsec ) ;
|
{
|
||||||
|
#if defined __NetBSD__ || ( defined __APPLE__ && defined __DARWIN_64_BIT_INO_T )
|
||||||
|
*t = DateTime( s.st_ctimespec.tv_sec, s.st_ctimespec.tv_nsec ) ;
|
||||||
#else
|
#else
|
||||||
return DateTime( s.st_ctim.tv_sec, s.st_ctim.tv_nsec);
|
*t = DateTime( s.st_ctim.tv_sec, s.st_ctim.tv_nsec);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
if ( size )
|
||||||
|
*size = s.st_size;
|
||||||
|
if ( ft )
|
||||||
|
*ft = S_ISDIR( s.st_mode ) ? FT_DIR : ( S_ISREG( s.st_mode ) ? FT_FILE : FT_UNKNOWN ) ;
|
||||||
|
}
|
||||||
|
|
||||||
void SetFileTime( const fs::path& filename, const DateTime& t )
|
void SetFileTime( const fs::path& filename, const DateTime& t )
|
||||||
{
|
{
|
||||||
|
|
|
@ -29,12 +29,18 @@ namespace gr {
|
||||||
class DateTime ;
|
class DateTime ;
|
||||||
class Path ;
|
class Path ;
|
||||||
|
|
||||||
|
enum FileType { FT_FILE = 1, FT_DIR = 2, FT_UNKNOWN = 3 } ;
|
||||||
|
|
||||||
|
#ifndef off64_t
|
||||||
|
#define off64_t off_t
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace os
|
namespace os
|
||||||
{
|
{
|
||||||
struct Error : virtual Exception {} ;
|
struct Error : virtual Exception {} ;
|
||||||
|
|
||||||
DateTime FileCTime( const std::string& filename ) ;
|
void Stat( const std::string& filename, DateTime *t, off64_t *size, FileType *ft ) ;
|
||||||
DateTime FileCTime( const fs::path& filename ) ;
|
void Stat( const fs::path& filename, DateTime *t, off64_t *size, FileType *ft ) ;
|
||||||
|
|
||||||
void SetFileTime( const std::string& filename, const DateTime& t ) ;
|
void SetFileTime( const std::string& filename, const DateTime& t ) ;
|
||||||
void SetFileTime( const fs::path& filename, const DateTime& t ) ;
|
void SetFileTime( const fs::path& filename, const DateTime& t ) ;
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "util/Types.hh"
|
||||||
|
|
||||||
|
namespace gr {
|
||||||
|
|
||||||
|
class Progress
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
virtual void reportProgress(u64_t total, u64_t processed) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
;
|
|
@ -0,0 +1,87 @@
|
||||||
|
#include <sys/ioctl.h>
|
||||||
|
#include <math.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
#include "ProgressBar.hh"
|
||||||
|
|
||||||
|
namespace gr
|
||||||
|
{
|
||||||
|
|
||||||
|
ProgressBar::ProgressBar(): showProgressBar(false), last(1000)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
ProgressBar::~ProgressBar()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void ProgressBar::setShowProgressBar(bool showProgressBar)
|
||||||
|
{
|
||||||
|
this->showProgressBar = showProgressBar;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned short int ProgressBar::determineTerminalSize()
|
||||||
|
{
|
||||||
|
struct winsize w;
|
||||||
|
ioctl(STDOUT_FILENO, TIOCGWINSZ, &w);
|
||||||
|
return w.ws_col;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ProgressBar::printBytes(u64_t bytes)
|
||||||
|
{
|
||||||
|
if (bytes >= 1024*1024*1024)
|
||||||
|
printf("%.3f GB", (double)bytes/1024/1024/1024);
|
||||||
|
else if (bytes >= 1024*1024)
|
||||||
|
printf("%.3f MB", (double)bytes/1024/1024);
|
||||||
|
else
|
||||||
|
printf("%.3f KB", (double)bytes/1024);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ProgressBar::reportProgress(u64_t total, u64_t processed)
|
||||||
|
{
|
||||||
|
if (showProgressBar && total)
|
||||||
|
{
|
||||||
|
// libcurl seems to process more bytes then the actual file size :)
|
||||||
|
if (processed > total)
|
||||||
|
processed = total;
|
||||||
|
double fraction = (double)processed/total;
|
||||||
|
|
||||||
|
int point = fraction*1000;
|
||||||
|
if (this->last < 1000 || point != this->last)
|
||||||
|
{
|
||||||
|
// do not print 100% progress multiple times (it will duplicate the progressbar)
|
||||||
|
this->last = point;
|
||||||
|
|
||||||
|
// 10 for prefix of percent and 26 for suffix of file size
|
||||||
|
int availableSize = determineTerminalSize() - 36;
|
||||||
|
int totalDots;
|
||||||
|
if (availableSize > 100)
|
||||||
|
totalDots = 100;
|
||||||
|
else if (availableSize < 0)
|
||||||
|
totalDots = 10;
|
||||||
|
else
|
||||||
|
totalDots = availableSize;
|
||||||
|
|
||||||
|
int dotz = round(fraction * totalDots);
|
||||||
|
int count = 0;
|
||||||
|
// delete previous output line
|
||||||
|
printf("\r [%3.0f%%] [", fraction * 100);
|
||||||
|
for (; count < dotz - 1; count++)
|
||||||
|
putchar('=');
|
||||||
|
putchar('>');
|
||||||
|
for (; count < totalDots - 1; count++)
|
||||||
|
putchar(' ');
|
||||||
|
printf("] ");
|
||||||
|
printBytes(processed);
|
||||||
|
putchar('/');
|
||||||
|
printBytes(total);
|
||||||
|
printf("\33[K\r");
|
||||||
|
if (point == 1000)
|
||||||
|
putchar('\n');
|
||||||
|
fflush(stdout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,25 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "util/Progress.hh"
|
||||||
|
|
||||||
|
namespace gr {
|
||||||
|
|
||||||
|
class ProgressBar: public Progress
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
ProgressBar();
|
||||||
|
virtual ~ProgressBar();
|
||||||
|
|
||||||
|
void reportProgress(u64_t total, u64_t processed);
|
||||||
|
void setShowProgressBar(bool showProgressBar);
|
||||||
|
|
||||||
|
private:
|
||||||
|
static void printBytes(u64_t bytes);
|
||||||
|
static unsigned short int determineTerminalSize();
|
||||||
|
|
||||||
|
bool showProgressBar;
|
||||||
|
int last;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
;
|
|
@ -51,7 +51,7 @@ off_t StringStream::Seek( off_t offset, int whence )
|
||||||
offset += m_pos;
|
offset += m_pos;
|
||||||
else if ( whence == 2 )
|
else if ( whence == 2 )
|
||||||
offset += Size();
|
offset += Size();
|
||||||
if ( offset > Size() )
|
if ( (u64_t)offset > Size() )
|
||||||
offset = Size();
|
offset = Size();
|
||||||
m_pos = (size_t)offset;
|
m_pos = (size_t)offset;
|
||||||
return m_pos;
|
return m_pos;
|
||||||
|
|
|
@ -39,7 +39,7 @@ CompositeLog::~CompositeLog()
|
||||||
std::for_each( m_logs.begin(), m_logs.end(), Destroy() ) ;
|
std::for_each( m_logs.begin(), m_logs.end(), Destroy() ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
LogBase* CompositeLog::Add( std::auto_ptr<LogBase> log )
|
LogBase* CompositeLog::Add( std::unique_ptr<LogBase>& log )
|
||||||
{
|
{
|
||||||
m_logs.push_back( log.get() ) ;
|
m_logs.push_back( log.get() ) ;
|
||||||
return log.release() ;
|
return log.release() ;
|
||||||
|
|
|
@ -32,7 +32,7 @@ public :
|
||||||
CompositeLog() ;
|
CompositeLog() ;
|
||||||
~CompositeLog() ;
|
~CompositeLog() ;
|
||||||
|
|
||||||
LogBase* Add( std::auto_ptr<LogBase> log ) ;
|
LogBase* Add( std::unique_ptr<LogBase>& log ) ;
|
||||||
|
|
||||||
void Log( const log::Fmt& msg, log::Serverity s ) ;
|
void Log( const log::Fmt& msg, log::Serverity s ) ;
|
||||||
|
|
||||||
|
|
|
@ -40,12 +40,12 @@ public :
|
||||||
}
|
}
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
LogBase* LogBase::Inst( std::auto_ptr<LogBase> log )
|
LogBase* LogBase::Inst( LogBase *log )
|
||||||
{
|
{
|
||||||
static std::auto_ptr<LogBase> inst( new MockLog ) ;
|
static std::unique_ptr<LogBase> inst( new MockLog ) ;
|
||||||
|
|
||||||
if ( log.get() != 0 )
|
if ( log != 0 )
|
||||||
inst = log ;
|
inst.reset( log ) ;
|
||||||
|
|
||||||
assert( inst.get() != 0 ) ;
|
assert( inst.get() != 0 ) ;
|
||||||
return inst.get() ;
|
return inst.get() ;
|
||||||
|
|
|
@ -65,7 +65,7 @@ public :
|
||||||
virtual bool Enable( log::Serverity s, bool enable = true ) = 0 ;
|
virtual bool Enable( log::Serverity s, bool enable = true ) = 0 ;
|
||||||
virtual bool IsEnabled( log::Serverity s ) const = 0 ;
|
virtual bool IsEnabled( log::Serverity s ) const = 0 ;
|
||||||
|
|
||||||
static LogBase* Inst( std::auto_ptr<LogBase> log = std::auto_ptr<LogBase>() ) ;
|
static LogBase* Inst( LogBase *log = 0 ) ;
|
||||||
virtual ~LogBase() ;
|
virtual ~LogBase() ;
|
||||||
|
|
||||||
protected :
|
protected :
|
||||||
|
@ -115,6 +115,12 @@ void Log(
|
||||||
LogBase::Inst()->Log( log::Fmt(fmt) % p1 % p2 % p3 % p4, s ) ;
|
LogBase::Inst()->Log( log::Fmt(fmt) % p1 % p2 % p3 % p4, s ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename P1, typename P2, typename P3, typename P4, typename P5>
|
||||||
|
void Log( const std::string& fmt, const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, log::Serverity s = log::info )
|
||||||
|
{
|
||||||
|
LogBase::Inst()->Log( log::Fmt(fmt) % p1 % p2 % p3 % p4 % p5, s ) ;
|
||||||
|
}
|
||||||
|
|
||||||
void Trace( const std::string& str ) ;
|
void Trace( const std::string& str ) ;
|
||||||
|
|
||||||
template <typename P1>
|
template <typename P1>
|
||||||
|
|
|
@ -23,7 +23,6 @@
|
||||||
#include "Node.hh"
|
#include "Node.hh"
|
||||||
#include "util/log/Log.hh"
|
#include "util/log/Log.hh"
|
||||||
|
|
||||||
#include <expat.h>
|
|
||||||
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
|
@ -55,7 +55,7 @@ private :
|
||||||
|
|
||||||
private :
|
private :
|
||||||
struct Impl ;
|
struct Impl ;
|
||||||
std::auto_ptr<Impl> m_impl ;
|
std::unique_ptr<Impl> m_impl ;
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
} } // end of namespace
|
} } // end of namespace
|
||||||
|
|
|
@ -21,7 +21,6 @@
|
||||||
|
|
||||||
#include "util/log/DefaultLog.hh"
|
#include "util/log/DefaultLog.hh"
|
||||||
|
|
||||||
#include "drive/EntryTest.hh"
|
|
||||||
#include "base/ResourceTest.hh"
|
#include "base/ResourceTest.hh"
|
||||||
#include "base/ResourceTreeTest.hh"
|
#include "base/ResourceTreeTest.hh"
|
||||||
#include "base/StateTest.hh"
|
#include "base/StateTest.hh"
|
||||||
|
@ -29,16 +28,15 @@
|
||||||
#include "util/FunctionTest.hh"
|
#include "util/FunctionTest.hh"
|
||||||
#include "util/ConfigTest.hh"
|
#include "util/ConfigTest.hh"
|
||||||
#include "util/SignalHandlerTest.hh"
|
#include "util/SignalHandlerTest.hh"
|
||||||
#include "xml/NodeTest.hh"
|
//#include "xml/NodeTest.hh"
|
||||||
|
|
||||||
int main( int argc, char **argv )
|
int main( int argc, char **argv )
|
||||||
{
|
{
|
||||||
using namespace grut ;
|
using namespace grut ;
|
||||||
|
|
||||||
gr::LogBase::Inst( std::auto_ptr<gr::LogBase>(new gr::log::DefaultLog) ) ;
|
gr::LogBase::Inst( new gr::log::DefaultLog ) ;
|
||||||
|
|
||||||
CppUnit::TextUi::TestRunner runner;
|
CppUnit::TextUi::TestRunner runner;
|
||||||
runner.addTest( Entry1Test::suite( ) ) ;
|
|
||||||
runner.addTest( StateTest::suite( ) ) ;
|
runner.addTest( StateTest::suite( ) ) ;
|
||||||
runner.addTest( ResourceTest::suite( ) ) ;
|
runner.addTest( ResourceTest::suite( ) ) ;
|
||||||
runner.addTest( ResourceTreeTest::suite( ) ) ;
|
runner.addTest( ResourceTreeTest::suite( ) ) ;
|
||||||
|
@ -46,7 +44,7 @@ int main( int argc, char **argv )
|
||||||
runner.addTest( FunctionTest::suite( ) ) ;
|
runner.addTest( FunctionTest::suite( ) ) ;
|
||||||
runner.addTest( ConfigTest::suite( ) ) ;
|
runner.addTest( ConfigTest::suite( ) ) ;
|
||||||
runner.addTest( SignalHandlerTest::suite( ) ) ;
|
runner.addTest( SignalHandlerTest::suite( ) ) ;
|
||||||
runner.addTest( NodeTest::suite( ) ) ;
|
//runner.addTest( NodeTest::suite( ) ) ;
|
||||||
runner.run();
|
runner.run();
|
||||||
|
|
||||||
return 0 ;
|
return 0 ;
|
||||||
|
|
|
@ -23,15 +23,15 @@
|
||||||
|
|
||||||
#include "base/Resource.hh"
|
#include "base/Resource.hh"
|
||||||
|
|
||||||
#include "drive/Entry1.hh"
|
#include "drive2/Entry2.hh"
|
||||||
#include "xml/Node.hh"
|
#include "json/Val.hh"
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
namespace grut {
|
namespace grut {
|
||||||
|
|
||||||
using namespace gr ;
|
using namespace gr ;
|
||||||
using namespace gr::v1 ;
|
using namespace gr::v2 ;
|
||||||
|
|
||||||
ResourceTest::ResourceTest( )
|
ResourceTest::ResourceTest( )
|
||||||
{
|
{
|
||||||
|
@ -51,19 +51,23 @@ void ResourceTest::TestNormal( )
|
||||||
Resource subject( "entry.xml", "file" ) ;
|
Resource subject( "entry.xml", "file" ) ;
|
||||||
root.AddChild( &subject ) ;
|
root.AddChild( &subject ) ;
|
||||||
|
|
||||||
|
GRUT_ASSERT_EQUAL( subject.IsRoot(), false ) ;
|
||||||
GRUT_ASSERT_EQUAL( subject.Path(), fs::path( TEST_DATA ) / "entry.xml" ) ;
|
GRUT_ASSERT_EQUAL( subject.Path(), fs::path( TEST_DATA ) / "entry.xml" ) ;
|
||||||
|
|
||||||
subject.FromLocal( DateTime() ) ;
|
Val st;
|
||||||
|
st.Add( "srv_time", Val( DateTime( "2012-05-09T16:13:22.401Z" ).Sec() ) );
|
||||||
|
subject.FromLocal( st ) ;
|
||||||
GRUT_ASSERT_EQUAL( subject.MD5(), "c0742c0a32b2c909b6f176d17a6992d0" ) ;
|
GRUT_ASSERT_EQUAL( subject.MD5(), "c0742c0a32b2c909b6f176d17a6992d0" ) ;
|
||||||
GRUT_ASSERT_EQUAL( subject.StateStr(), "local_new" ) ;
|
GRUT_ASSERT_EQUAL( subject.StateStr(), "local_new" ) ;
|
||||||
|
|
||||||
xml::Node entry = xml::Node::Element( "entry" ) ;
|
Val entry;
|
||||||
entry.AddElement( "updated" ).AddText( "2012-05-09T16:13:22.401Z" ) ;
|
entry.Set( "modifiedDate", Val( std::string( "2012-05-09T16:13:22.401Z" ) ) );
|
||||||
|
entry.Set( "md5Checksum", Val( std::string( "DIFFERENT" ) ) );
|
||||||
|
|
||||||
Entry1 remote( entry ) ;
|
Entry2 remote( entry ) ;
|
||||||
subject.FromRemote( remote, DateTime() ) ;
|
GRUT_ASSERT_EQUAL( "different", remote.MD5() ) ;
|
||||||
|
subject.FromRemote( remote ) ;
|
||||||
GRUT_ASSERT_EQUAL( "local_changed", subject.StateStr() ) ;
|
GRUT_ASSERT_EQUAL( "local_changed", subject.StateStr() ) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
} // end of namespace grut
|
} // end of namespace grut
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
#include "json/Val.hh"
|
#include "json/Val.hh"
|
||||||
#include <boost/test/unit_test.hpp>
|
#include <boost/test/unit_test.hpp>
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
using namespace gr ;
|
using namespace gr ;
|
||||||
|
|
||||||
|
@ -33,11 +34,11 @@ BOOST_FIXTURE_TEST_SUITE( ValTest, Fixture )
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE( TestSimpleTypes )
|
BOOST_AUTO_TEST_CASE( TestSimpleTypes )
|
||||||
{
|
{
|
||||||
Val null ;
|
BOOST_CHECK_EQUAL( Val::Null().Type(), Val::null_type ) ;
|
||||||
BOOST_CHECK_EQUAL( null.Type(), Val::null_type ) ;
|
BOOST_CHECK( Val::Null().Is<void>() ) ;
|
||||||
BOOST_CHECK( null.Is<void>() ) ;
|
|
||||||
|
|
||||||
Val i( 100 ) ;
|
Val i( 100 ) ;
|
||||||
|
BOOST_CHECK_EQUAL( i.Str(), "100" );
|
||||||
BOOST_CHECK_EQUAL( i.As<long long>(), 100 ) ;
|
BOOST_CHECK_EQUAL( i.As<long long>(), 100 ) ;
|
||||||
BOOST_CHECK_EQUAL( i.Type(), Val::int_type ) ;
|
BOOST_CHECK_EQUAL( i.Type(), Val::int_type ) ;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
{ "change_stamp": "", "rtree": { "name": ".", "id": "folder:root", "href": "https:\/\/docs.google.com\/feeds\/default\/private\/full\/folder%3Aroot", "md5": "", "kind": "folder", "mtime": { "sec": 0, "nsec": 0 }, "child": [ { "name": "entry.xml", "id": "", "href": "", "md5": "c0742c0a32b2c909b6f176d17a6992d0", "kind": "file", "mtime": { "sec": 1336796872, "nsec": 404985662 }, "child": [ ] } ] } }
|
|
|
@ -1,59 +0,0 @@
|
||||||
/*
|
|
||||||
grive: an GPL program to sync a local directory with Google Drive
|
|
||||||
Copyright (C) 2012 Wan Wai Ho
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or
|
|
||||||
modify it under the terms of the GNU General Public License
|
|
||||||
as published by the Free Software Foundation version 2
|
|
||||||
of the License.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program; if not, write to the Free Software
|
|
||||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "EntryTest.hh"
|
|
||||||
|
|
||||||
#include "Assert.hh"
|
|
||||||
|
|
||||||
#include "drive/Entry1.hh"
|
|
||||||
#include "xml/Node.hh"
|
|
||||||
#include "xml/NodeSet.hh"
|
|
||||||
#include "xml/TreeBuilder.hh"
|
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
namespace grut {
|
|
||||||
|
|
||||||
using namespace gr ;
|
|
||||||
using namespace gr::v1 ;
|
|
||||||
|
|
||||||
Entry1Test::Entry1Test( )
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void Entry1Test::TestXml( )
|
|
||||||
{
|
|
||||||
xml::Node root = xml::TreeBuilder::ParseFile( TEST_DATA "entry.xml" ) ;
|
|
||||||
|
|
||||||
CPPUNIT_ASSERT( !root["entry"].empty() ) ;
|
|
||||||
|
|
||||||
Entry1 subject( root["entry"].front() ) ;
|
|
||||||
GRUT_ASSERT_EQUAL( "snes", subject.Title() ) ;
|
|
||||||
GRUT_ASSERT_EQUAL( "\"WxYPGE8CDyt7ImBk\"", subject.ETag() ) ;
|
|
||||||
GRUT_ASSERT_EQUAL( "https://docs.google.com/feeds/default/private/full/folder%3A0B5KhdsbryVeGMl83OEV1ZVc3cUE",
|
|
||||||
subject.SelfHref() ) ;
|
|
||||||
|
|
||||||
GRUT_ASSERT_EQUAL( 1U, subject.ParentHrefs().size() ) ;
|
|
||||||
GRUT_ASSERT_EQUAL( "https://docs.google.com/feeds/default/private/full/folder%3A0B5KhdsbryVeGNEZjdUxzZHl3Sjg",
|
|
||||||
subject.ParentHrefs().front() ) ;
|
|
||||||
|
|
||||||
GRUT_ASSERT_EQUAL( true, subject.IsDir() ) ;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // end of namespace grut
|
|
|
@ -1,41 +0,0 @@
|
||||||
/*
|
|
||||||
grive: an GPL program to sync a local directory with Google Drive
|
|
||||||
Copyright (C) 2012 Wan Wai Ho
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or
|
|
||||||
modify it under the terms of the GNU General Public License
|
|
||||||
as published by the Free Software Foundation version 2
|
|
||||||
of the License.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program; if not, write to the Free Software
|
|
||||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <cppunit/TestFixture.h>
|
|
||||||
#include <cppunit/extensions/HelperMacros.h>
|
|
||||||
|
|
||||||
namespace grut {
|
|
||||||
|
|
||||||
class Entry1Test : public CppUnit::TestFixture
|
|
||||||
{
|
|
||||||
public :
|
|
||||||
Entry1Test( ) ;
|
|
||||||
|
|
||||||
// declare suit function
|
|
||||||
CPPUNIT_TEST_SUITE( Entry1Test ) ;
|
|
||||||
CPPUNIT_TEST( TestXml ) ;
|
|
||||||
CPPUNIT_TEST_SUITE_END();
|
|
||||||
|
|
||||||
private :
|
|
||||||
void TestXml( ) ;
|
|
||||||
} ;
|
|
||||||
|
|
||||||
} // end of namespace
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
SET(GRIVE_SYNC_SH_BINARY "${CMAKE_INSTALL_FULL_LIBEXECDIR}/grive/grive-sync.sh")
|
||||||
|
|
||||||
|
CONFIGURE_FILE(grive-changes@.service.in grive-changes@.service @ONLY)
|
||||||
|
CONFIGURE_FILE(grive-timer@.service.in grive-timer@.service @ONLY)
|
||||||
|
|
||||||
|
install(
|
||||||
|
FILES
|
||||||
|
grive@.service
|
||||||
|
${CMAKE_BINARY_DIR}/systemd/grive-changes@.service
|
||||||
|
${CMAKE_BINARY_DIR}/systemd/grive-timer@.service
|
||||||
|
DESTINATION
|
||||||
|
lib/systemd/user
|
||||||
|
)
|
||||||
|
|
||||||
|
install(
|
||||||
|
FILES
|
||||||
|
grive-timer@.timer
|
||||||
|
DESTINATION
|
||||||
|
lib/systemd/user
|
||||||
|
)
|
||||||
|
|
||||||
|
install(
|
||||||
|
PROGRAMS
|
||||||
|
grive-sync.sh
|
||||||
|
DESTINATION
|
||||||
|
${CMAKE_INSTALL_FULL_LIBEXECDIR}/grive
|
||||||
|
)
|
|
@ -0,0 +1,11 @@
|
||||||
|
[Unit]
|
||||||
|
Description=Google drive sync (changed files)
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=@GRIVE_SYNC_SH_BINARY@ listen "%i"
|
||||||
|
Type=simple
|
||||||
|
Restart=always
|
||||||
|
RestartSec=30
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
|
@ -0,0 +1,122 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright (C) 2009 Przemyslaw Pawelczyk <przemoc@gmail.com>
|
||||||
|
# (C) 2017 Jan Schulz <jasc@gmx.net>
|
||||||
|
##
|
||||||
|
## This script is licensed under the terms of the MIT license.
|
||||||
|
## https://opensource.org/licenses/MIT
|
||||||
|
|
||||||
|
# Fail on all errors
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# We always start in the current users home directory so that names always start there
|
||||||
|
cd ~
|
||||||
|
|
||||||
|
|
||||||
|
### ARGUMENT PARSING ###
|
||||||
|
SCRIPT="${0}"
|
||||||
|
DIRECTORY=$(systemd-escape --unescape -- "$2")
|
||||||
|
|
||||||
|
if [[ -z "$DIRECTORY" ]] || [[ ! -d "$DIRECTORY" ]] ; then
|
||||||
|
echo "Need a directory name in the current users home directory as second argument. Aborting."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if [[ -z "${1}" ]] ; then
|
||||||
|
echo "Need a command as first argument. Aborting."
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
if [[ "sync" == "${1}" ]] ; then
|
||||||
|
COMMAND=sync
|
||||||
|
elif [[ "listen" == "${1}" ]] ; then
|
||||||
|
COMMAND=listen
|
||||||
|
else
|
||||||
|
echo "Unknown command. Aborting."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
### LOCKFILE BOILERPLATE ###
|
||||||
|
LOCKFILE="/run/user/"$(id -u)"/"$(basename "$0")"_"${DIRECTORY//\//_}""
|
||||||
|
LOCKFD=99
|
||||||
|
|
||||||
|
# PRIVATE
|
||||||
|
_lock() { flock -"$1" "$LOCKFD"; }
|
||||||
|
_no_more_locking() { _lock u; _lock xn && rm -f "$LOCKFILE"; }
|
||||||
|
_prepare_locking() { eval "exec "$LOCKFD">\""$LOCKFILE"\""; trap _no_more_locking EXIT; }
|
||||||
|
|
||||||
|
# ON START
|
||||||
|
_prepare_locking
|
||||||
|
|
||||||
|
# PUBLIC
|
||||||
|
exlock_now() { _lock xn; } # obtain an exclusive lock immediately or fail
|
||||||
|
exlock() { _lock x; } # obtain an exclusive lock
|
||||||
|
shlock() { _lock s; } # obtain a shared lock
|
||||||
|
unlock() { _lock u; } # drop a lock
|
||||||
|
|
||||||
|
### SYNC SCRIPT ###
|
||||||
|
# Idea: only let one script run, but if the sync script is called a second time
|
||||||
|
# make sure we sync a second time, too
|
||||||
|
|
||||||
|
sync_directory() {
|
||||||
|
_directory="${1}"
|
||||||
|
|
||||||
|
reset_timer_and_exit() { echo "Retriggered google drive sync ('${_directory}')" && touch -m $LOCKFILE && exit; }
|
||||||
|
|
||||||
|
exlock_now || reset_timer_and_exit
|
||||||
|
|
||||||
|
if ping -c1 -W1 -q accounts.google.com >/dev/null 2>&1; then
|
||||||
|
true
|
||||||
|
# pass
|
||||||
|
else
|
||||||
|
echo "Google drive server not reachable, NOT syncing..."
|
||||||
|
unlock
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
TIME_AT_START=0
|
||||||
|
TIME_AT_END=1
|
||||||
|
while [[ "${TIME_AT_START}" -lt "${TIME_AT_END}" ]]; do
|
||||||
|
echo "Syncing '${_directory}'..."
|
||||||
|
TIME_AT_START="$(stat -c %Y "$LOCKFILE")"
|
||||||
|
grive -p "${_directory}" 2>&1 | grep -v -E "^Reading local directories$|^Reading remote server file list$|^Synchronizing files$|^Finished!$"
|
||||||
|
TIME_AT_END="$(stat -c %Y "$LOCKFILE")"
|
||||||
|
echo "Sync of '${_directory}' done."
|
||||||
|
done
|
||||||
|
|
||||||
|
# always exit ok, so that we never go into a wrong systemd state
|
||||||
|
unlock
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
### LISTEN TO CHANGES IN DIRECTORY ###
|
||||||
|
|
||||||
|
|
||||||
|
listen_directory() {
|
||||||
|
_directory="${1}"
|
||||||
|
|
||||||
|
type inotifywait >/dev/null 2>&1 || { echo >&2 "I require inotifywait but it's not installed. Aborting."; exit 1; }
|
||||||
|
|
||||||
|
echo "Listening for changes in '${_directory}'"
|
||||||
|
|
||||||
|
while true #run indefinitely
|
||||||
|
do
|
||||||
|
# Use a different call to not need to change exit into return
|
||||||
|
inotifywait -q -r -e modify,attrib,close_write,move,create,delete --exclude ".grive_state|.grive" "${_directory}" > /dev/null 2>&1 && ${SCRIPT} sync $(systemd-escape "${_directory}")
|
||||||
|
#echo ${SCRIPT} "${_directory}"
|
||||||
|
done
|
||||||
|
|
||||||
|
# always exit ok, so that we never go into a wrong systemd state
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ "${COMMAND}" == listen ]] ; then
|
||||||
|
listen_directory "${DIRECTORY}"
|
||||||
|
else
|
||||||
|
sync_directory "${DIRECTORY}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# always exit ok, so that we never go into a wrong systemd state
|
||||||
|
exit 0
|
|
@ -0,0 +1,6 @@
|
||||||
|
[Unit]
|
||||||
|
Description=Google drive sync (executed by timer unit)
|
||||||
|
After=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=@GRIVE_SYNC_SH_BINARY@ sync "%i"
|
|
@ -0,0 +1,11 @@
|
||||||
|
[Unit]
|
||||||
|
Description=Google drive sync (fixed intervals)
|
||||||
|
|
||||||
|
[Timer]
|
||||||
|
OnCalendar=*:0/5
|
||||||
|
OnBootSec=3min
|
||||||
|
OnUnitActiveSec=5min
|
||||||
|
Unit=grive-timer@%i.service
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
|
@ -0,0 +1,13 @@
|
||||||
|
[Unit]
|
||||||
|
Description=Google drive sync (main)
|
||||||
|
Requires=grive-timer@%i.timer grive-changes@%i.service
|
||||||
|
|
||||||
|
# dummy service
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
ExecStart=/bin/true
|
||||||
|
# This service shall be considered active after start
|
||||||
|
RemainAfterExit=yes
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
Loading…
Reference in New Issue