diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0039143 --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +.idea/ +*.iml +.*.swp +.DS_Store +target/ +/vertx/public/dataviewer/node_modules +/vertx/public/dataviewer/ui/bundle.js +/vertx/public/dataviewer/ui/bundle.css +/vertx/public/dataviewer/ui/*.png +/vertx/public/dataviewer/ui/*.gif +/vertx/public/dataviewer/lib/csync/node_modules diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..83b3a03 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,14 @@ +language: scala +scala: + - 2.12.1 +services: + - rabbitmq + - postgresql +jdk: + - oraclejdk8 +branches: + only: + - master + - develop +addons: + postgresql: "9.5" \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..72e2f5d --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,77 @@ +## Coding guidelines + +Contributions to the CSync Server should use the good Scala coding style. The project is set up so that developers can use [ScalaStyle][scalastyle] to check for common violations of proper Scala coding style. ScalaStyle is set up to automatically run when the tests are run. + +[scalastyle]: http://www.scalastyle.org/ + +## Documentation + +All code changes should include comments describing the design, assumptions, dependencies, and non-obvious aspects of the implementation. +Hopefully the existing code provides a good example of appropriate code comments. +If necessary, make the appropriate updates in the README.md and other documentation files. + +## Contributing your changes + +1. If one does not exist already, open an issue that your contribution is going to resolve or fix. + 1. Make sure to give the issue a clear title and a very focused description. +2. On the issue page, set the appropriate Pipeline, Label(s), Milestone, and assign the issue to +yourself. + 1. We use Zenhub to organize our issues and plan our releases. Giving as much information as to + what your changes are help us organize PRs and streamline the committing process. +3. Make a branch from the develop branch using the following naming convention: + 1. `YOUR_INITIALS/ISSUE#-DESCRIPTIVE-NAME` + 2. For example, `kb/94-create-contributingmd` was the branch that had the commit containing this + tutorial. +4. Commit your changes! +5. When you have completed making all your changes, create a Pull Request (PR) from your git manager +or our Github repo. +6. In the comment for the PR write `Resolves #___` and fill the blank with the issue number you +created earlier. + 1. For example, the comment we wrote for the PR with this tutorial was `Resolves #94` +7. That's it, thanks for the contribution! + +## Setting up your environment + +You have probably got most of these set up already, but starting from scratch you will need the following: + + * SBT + * PostgreSQL + * RabbitMQ + +1. First, install SBT using `brew install sbt` + +2. Install RabbitMQ using `brew install rabbitmq` + +3. Install PostGresql using `brew install postgres` + +4. Start up RabbitMQ by running `brew services start rabbitmq` + +5. Start up PostgreSQL by running `brew services start postgres` + +6. Run `createdb` to create the PostgreSQL database for csync to use + +7. You can stop RabbitMQ or PostgreSQL at any time by running `brew services stop (rabbitmq or postgres)` + +## Running the tests + +From the command line, run `sbt test` to run the tests and ScalaStyle checks + +To generate code coverage, run `sbt clean coverage test` + +To generate a human readable report, run `sbt coverageReport`. This will appear in the `core/target/{scalaversion}/scoverage-report` folder. + +# Dependency Table + +| Name | Version |Author |License | Release Date | Verification Code | URL | +|--------------|---------|---------|--------|--------------|-------------------|-----| +| vert.x framework
- vertx-core_3.3.0
- vertx-codegen_3.3.0
- | 3.3.0 | | Apache 2.0 | n/a | n/a | +| scala-logging | 3.4.0 | | Apache 2.0 | n/a | n/a | com.typesafe.scala-logging:scala-logging | +| amqp-client | 3.6.2 | Pivotal | Apache 2.0 | n/a | n/a | com.rabbitmq:amqp-client | +| postgresql | 9.4-1208-jdbc41 | | PostgreSQL License | n/a | n/a | org.postgresql:postgresql | +| google-api-client| 1.22.0 | Google | Apache 2.0 | n/a | n/a | com.google.api-client:google-api-client | +| HikariCP | 2.4.6 | Brett Wooldridge| Apache 2.0 | n/a | n/a | com.zaxxer:HikariCP | +| scalaj-http | 2.3.0 | Jon Hoffman | Apache 2.0 | n/a | n/a | org.scalaj:scalaj-http | +| slf4j-api | 1.7.21 | | MIT License | n/a | n/a | org.slf4j:slf4j-api | +| slf4j-simple | 1.7.21 | | MIT License | n/a | n/a | org.slf4j:slf4j-simple | +| boopickle | 1.2.5 | Otto Chrons | MIT License| n/a | n/a | | + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d9a10c0 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..d5ff276 --- /dev/null +++ b/Makefile @@ -0,0 +1,19 @@ +TARGETDIR=target/docker +TARGET=stage +IMAGE_NAME=csync + +all : sbt_compile; + +sbt_% : + ../sbt $* + +% : sbt_%; + +image: + @ echo "" + @ echo "Creating all in one docker image (will take a few minutes). *** Ctrl-C NOW to abort *** " + @ sleep 7 + @ echo "Running sbt clean docker:stage" + ./npmgulp.sh + @ sbt clean clean-files docker:stage + @ docker build --no-cache=true -t ${IMAGE_NAME} ${TARGETDIR}/${TARGET} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..6e17ac6 --- /dev/null +++ b/README.md @@ -0,0 +1,118 @@ +# Contextual Sync + +Contextual Sync (CSync) is an open source, real-time, continuous data synchronization service for building modern applications. The CSync data store is organized with key/values where keys have a hierarchical structure. Clients can obtain the current value for a key and any subsequent updates by listening on the key. Updates are delivered to all online clients in near-real time. Clients can also listen on a key pattern where some components contain wildcards. + +## Keys +CSync is structured as a tree of nodes referenced by period-delimited strings called keys. + +To illustrate : + +``` + companies + / \ + ibm google + / \ / \ +stock offices stock offices +``` + +The above tree consists of the following keys : `companies`, `companies.ibm`, `companies.google`, `companies.ibm.stock`, `companies.ibm.offices`, `companies.google.stock`, `companies.google.offices`. Any one of these keys can be listened to at a time and all changes to that singular node will be synced to the client device. + +### Key Limitations +Keys can have a maximum of 16 parts and a total length of 200 characters. Key components may contain only uppercase and lowercase alphabetic, numeric, "_", and "-". + +Valid key: `this.is.a.valid.key.123456.7.8.9.10` + +Invalid key: `this is an.invalidkey.🍕.4.5.6.7.8.9.10.11.12.13.14.15.16.17.18` + +### Wildcards in Keys +Suppose a developer wishes to listen to a subset of the tree containing multiple nodes, CSync provides this ability through wildcards. Currently CSync supports `*` and `#` as wildcards. + +#### Astrix Wildcard +An astrix (`*`) wildcard will match any value in the part of the key where the wildcard is. As an example, if a developer listens to `companies.*.stock` in the above tree, the client will sync with all stock nodes for all companies. + +#### Hash Wildcard +If a developer wishes to listen to all child nodes in a subset of the tree, the `#` can appended to the end of a key and the client will sync with all child nodes of the specified key. For instance in the above tree if a user listens to `companies.ibm.#`, then the client will sync with all child nodes of `companies.ibm` which include `companies.ibm.stock` and `companies.ibm.offices`. + +**Note:** Each listen is independent. For example, if a developer listens to both `companies.*.stock` and `companies.companyX.stock`, the data from `companies.companyX.stock` will be received by both of the listeners. + +## Guaranteed Relevance +Only the latest, most recent, values sync, so you’re never left with old data. CSync provides a consistent view of the values for keys in the CSync store. If no updates are made to a key for a long enough period of time, all subscribers to the key will see the same consistent value. CSync guarantees that the latest update will be reflected at all connected, subscribed clients, but not that all updates to a key will be delivered. Clients will not receive an older value than what they have already received for a given key. + +## Local Storage +Work offline, read and write, and have data automatically sync the next time you’re connected. CSync maintains a local cache of data that is available to the client even when the client is offline or otherwise not connected to the CSync service. The client may perform listens, writes, and deletes on the local store while offline. When the client reestablishes connectivity to the CSync service, the local cache is efficiently synchronized with the latest data from the CSync store. The local cache is persistent across application restarts and device reboots. + +## Access Controls +Use simple access controls to clearly state who can read and write, keeping your data safe. Each key in the CSync store has an associated access control list (ACL) that specifies which users can access the key. + +Three specific forms of access are defined: +- Create: Users with create permission may create child keys of this key. +- Read: Users with read permission may read the data for the key. +- Write: Users with write permission may write the data for the key. + +The creator of a key in the CSync store has special permissions to that key. In particular, the creator always has Read, Write, and Create permissions, and they also have permission to delete the key and change its ACL. + +CSync provides eight "static" ACLs that can be used to provide any combination of Read, Write, and Create access to just the key's creator or all users. +- Private +- PublicRead +- PublicWrite +- PublicCreate +- PublicReadWrite +- PublicReadCreate +- PublicWriteCreate +- PublicReadWriteCreate + +The ACL for a key is set when the key is created by the first write performed to the key. If the write operation specified an ACL, then this ACL is attached to the key. If no ACL was specified in the write, then the key inherits the ACL from its closest ancestor in the key space—its parent if the parent exists, else its grandparent if that key exists, possibly all the way back to the root key. The ACL of the root key is `PublicCreate`, which permits any user to create a child key but does not allow public read or write access. + +# Getting Started + +1. Download and install [Docker], gulp and sbt: + - docker https://www.docker.com/products/overview + - gulp `npm install --global gulp-cli` + - sbt `brew install sbt` + +2. Clone this Repo + +3. Make csync image from the root folder: + + - `make image` + +4. Run the csync image: + + - `docker run -d -p 6005:6005 csync` + + To enable **Google Authentication** add in a environment variable like so: + + - `docker run -d -p 6005:6005 -e CSYNC_GOOGLE_CLIENT_IDS="CLIENTID HERE" csync` + + To enable **GitHub Authentication** add in a client ID and client Secret + + - `docker run -d -p 6005:6005 -e CSYNC_GITHUB_ID githubIdHere -e CSYNC_GITHUB_SECRET githubSecretHere csync` + + Both authentication providers can be enabled at the same time by having all environment variables specified. + + Click [here] (https://github.ibm.com/csync/csync-server/wiki/Create-a-CSync-Instance-on-Bluemix) for instructions to run CSync on Bluemix + + Need to handle workloads larger than what's possible with a single instance? [Check this out](https://github.ibm.com/csync/csync-server/wiki/Using-external-PostgreSQL-and-RabbitMQ-instances) + +### Dataviewer + +When running a local CSync instance, the dataviewer can be accessed on `localhost:6005`. Currently the dataviewer supports Google Authentication and Guest Login. For details on how to use the dataviewer, checkout the [README](https://github.ibm.com/csync/csync-server/blob/develop/vertx/public/dataviewer/README.md). + +NOTE: Chrome is the only supported browser at this time. Contributions to supporting other browsers are welcome. + +## Additional Commands +`docker ps` will list out running containers and the images within. + +`docker images` will list out available local docker images. + +In case you need a new image, you need to stop and delete the old one by running `docker kill ` and `docker rm ` + +## Troubleshooting +Having issues with the server? Check out the debugging article [here] (https://github.ibm.com/csync/csync-server/wiki/Debugging-a-server-problem). + +# License +This library is licensed under Apache 2.0. Full license text is +available in [LICENSE](LICENSE). + +# Contribution Guide +Want to contribute? Take a look at our [CONTRIBUTING.md] (https://github.ibm.com/csync/csync-server/blob/develop/CONTRIBUTING.md) diff --git a/build.sbt b/build.sbt new file mode 100644 index 0000000..eb959de --- /dev/null +++ b/build.sbt @@ -0,0 +1,156 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import com.typesafe.sbt.packager.MappingsHelper._ +import com.typesafe.sbt.packager.docker.DockerPlugin.autoImport._ +import com.typesafe.sbt.packager.docker._ + +// Scalaiform auto code formatting settings +import scalariform.formatter.preferences._ +import com.typesafe.sbt.SbtScalariform +import com.typesafe.sbt.SbtScalariform.ScalariformKeys + +SbtScalariform.scalariformSettings + +name := "csync" +lazy val commonSettings = Seq( + version := "1.0.0", + scalaVersion := "2.12.1", + scalacOptions ++= Seq("-deprecation") /*, "-Xexperimental")*/ , + ScalariformKeys.preferences := ScalariformKeys.preferences.value + .setPreference(SpacesAroundMultiImports, false), + exportJars := true, + mappings in Universal += ((packageBin in Compile) map { jar => + jar -> ("lib/" + jar.getName) + }).value +) + +lazy val postgresDriver = "org.postgresql" % "postgresql" % "9.4.1208" +lazy val logging = "org.slf4j" % "slf4j-simple" % "1.7.21" +lazy val scalaTest = "org.scalatest" %% "scalatest" % "3.0.1" +lazy val scalaCheck = "org.scalacheck" %% "scalacheck" % "1.13.4" + +// Keeping the silly style thing happy +lazy val P9000 = 9000 +lazy val P9443 = 9443 + +lazy val server = (project in file(".")) + .enablePlugins(UniversalPlugin, JavaAppPackaging, DockerPlugin) + .settings(commonSettings: _*) + .aggregate(core, vertx).dependsOn(core, vertx) + .aggregate(core) + .settings( + mappings in Universal ++= directory("vertx/public"), + aggregate in Docker := false, + NativePackagerKeys.maintainer in Docker := "CSync", + NativePackagerKeys.dockerExposedPorts in Docker := Seq(P9000, P9443), + NativePackagerKeys.dockerBaseImage := "ibmcom/csync-base", + NativePackagerKeys.daemonUser in Docker := "postgres", + NativePackagerKeys.dockerCommands := dockerCommands.value.filterNot { + // ExecCmd is a case class, and args is a varargs variable, so you need to bind it with @ + // case ExecCmd("USER", args @ _*) => true + case Cmd("USER", arg) => true + case ExecCmd("ENTRYPOINT", args@_*) => true + // don't filter the rest + case _ => false + }, + + NativePackagerKeys.dockerCommands ++= Seq( + Cmd("RUN", "echo 'net.ipv4.icmp_echo_ignore_broadcasts = 1'>>/etc/sysctl.conf&&echo 'net.ipv4.tcp_syncookies = 1'>>/etc/sysctl.conf&&echo 'net.ipv4.ip_forward = 0'>>/etc/sysctl.conf"), + Cmd("RUN", "touch /var/log/wtmp /etc/security/opasswd &&chmod 664 /var/log/wtmp&&chmod 600 /etc/security/opasswd"), + Cmd("RUN", "sed -i.foo 's/.*PASS_MAX_DAYS.*$/PASS_MAX_DAYS 90/' /etc/login.defs"), + Cmd("RUN", "echo 'password requisite pam_cracklib.so retry=3 minlen=8' >> /etc/pam.d/common-password"), + Cmd("RUN", "sed -i.foo 's/.*PASS_MIN_DAYS.*$/PASS_MIN_DAYS 1/' /etc/login.defs"), + Cmd("RUN", "apt-key update && apt-get update && apt-get -y install apt-utils && apt-get -y upgrade && apt-get clean && rm -rf /var/lib/apt/lists/*"), + ExecCmd("ENTRYPOINT", "/csync.sh"))) + +lazy val vertx = project.dependsOn(core) + .enablePlugins(UniversalPlugin, JavaAppPackaging, DockerPlugin) + .settings(commonSettings: _*) + .settings( + + mainClass := Some("com.ibm.csync.vertx.Main"), + + libraryDependencies ++= Seq( + + // Vertx + "io.vertx" % "vertx-core" % "3.3.0", + "io.vertx" % "vertx-codegen" % "3.3.0", + + logging, + postgresDriver, + "com.zaxxer" % "HikariCP" % "2.4.6", + "org.json4s" %% "json4s-native" % "3.5.0" + + ), + + libraryDependencies ++= Seq( + scalaTest, + scalaCheck + ) map { + _ % "test" + } + ) + +lazy val core = project + .settings(commonSettings: _*) + .enablePlugins(UniversalPlugin, JavaAppPackaging, DockerPlugin) + .settings( + + libraryDependencies ++= Seq( + + // Brilliant source info macros + "com.lihaoyi" %% "sourcecode" % "0.1.3", + + "me.chrons" %% "boopickle" % "1.2.5", + + // Rest client + "org.scalaj" %% "scalaj-http" % "2.3.0", + + // Logging + "org.slf4j" % "slf4j-api" % "1.7.21", + "com.typesafe.scala-logging" %% "scala-logging" % "3.5.0", + + // Rabbit + "com.rabbitmq" % "amqp-client" % "3.6.2", + + // google-api-client, version 1.22.0 + "com.google.api-client" % "google-api-client" % "1.22.0", + "com.google.api-client" % "google-api-client-gson" % "1.22.0" + + // avoid conflict + //xml + ), + + libraryDependencies ++= Seq( + postgresDriver, + logging, + scalaTest, + scalaCheck + ) map { + _ % "test" + } + ) + +lazy val testScalastyle = taskKey[Unit]("testScalastyle") +scalastyleSources in Test := Seq(file("core/")) +testScalastyle := org.scalastyle.sbt.ScalastylePlugin.scalastyle.in(Test).toTask("").value +(test in Test) := ((test in Test) dependsOn testScalastyle).value + +concurrentRestrictions in Global += Tags.limit(Tags.Test, 1) + +run in Compile := (run in Compile in vertx).evaluated +mainClass in Compile := (mainClass in Compile in vertx).value diff --git a/core/src/main/scala/com/ibm/csync/Utils.scala b/core/src/main/scala/com/ibm/csync/Utils.scala new file mode 100644 index 0000000..605ecd8 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/Utils.scala @@ -0,0 +1,55 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync + +import scala.annotation.tailrec +import scala.concurrent.Future +import scala.util.{Failure, Success, Try} + +object Utils { + + def using[T <: AutoCloseable, U](resource: T)(f: T => U): U = { + try { + f(resource) + } finally { + resource.close() + } + } + + def attempt[T](n: Int)(f: => Try[T]): Try[T] = { + @tailrec + def loop(count: Int, last: Option[Try[T]]): Try[T] = + if (count < 1) { + last.getOrElse(Failure(new IllegalArgumentException(s"n = $n"))) + } else { + f match { + case x @ Success(_) => x + case x @ Failure(_) => loop(count - 1, Some(x)) + } + } + + loop(n, None) + } + + def optionToFuture[T](option: Option[T], e: => Throwable): Future[T] = + option match { + case Some(data) => Future.successful(data) + case None => Future.failed(e) + } + +} + diff --git a/core/src/main/scala/com/ibm/csync/auth/demo/ValidateDemoToken.scala b/core/src/main/scala/com/ibm/csync/auth/demo/ValidateDemoToken.scala new file mode 100644 index 0000000..1e6186e --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/auth/demo/ValidateDemoToken.scala @@ -0,0 +1,35 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.auth.demo + +import com.ibm.csync.session.{Session, UserInfo} +import com.typesafe.scalalogging.LazyLogging + +object ValidateDemoToken extends LazyLogging { + + def validate(token: String): UserInfo = { + logger.info(s"[validateToken]: $token Validating demo id token representing user’s identity asserted by the identity provider") + + token match { + case Session.demoToken => UserInfo("demoUser") + case Session.userToken(user) => UserInfo(user) + case _ => + logger.debug(s"Token validation failed for token: $token") + throw new Exception("Cannot establish session. Token validation failed") + } + } +} diff --git a/core/src/main/scala/com/ibm/csync/auth/github/ValidateGitHubToken.scala b/core/src/main/scala/com/ibm/csync/auth/github/ValidateGitHubToken.scala new file mode 100644 index 0000000..3b22fe5 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/auth/github/ValidateGitHubToken.scala @@ -0,0 +1,56 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.auth.github + +import com.google.api.client.json.JsonFactory +import com.google.api.client.json.gson.GsonFactory +import com.ibm.csync.session.UserInfo +import com.typesafe.scalalogging.LazyLogging + +import scala.util.Try +import scalaj.http.{Http, HttpResponse} + +object ValidateGitHubToken extends LazyLogging { + + val githubClientId = sys.env.getOrElse("CSYNC_GITHUB_ID", "") + val githubClientSecret = sys.env.getOrElse("CSYNC_GITHUB_SECRET", "") + + val jsonFactory: JsonFactory = new GsonFactory() + + def validate(token: String): UserInfo = { + logger.info(s"[validateToken]: $token Validating github id token representing user’s identity asserted by the identity provider") + + val url = s"https://api.github.com/applications/${githubClientId}/tokens/$token" + + val response: Try[HttpResponse[String]] = Try(Http(url).auth(githubClientId, githubClientSecret).asString) + + if (response.isFailure || response.get.code != 200) { + logger.info(s"[validateGitHubToken]: Token validation failed for token: ${token}") + throw new Exception("Cannot establish session. Token validation failed") + } + + val data = response.get.body + val start = data.indexOfSlice("\"id\"") + 5 + val end = data.indexOf(':', start) + val id = data.slice(start, end).trim + + val authenticatorId = s"github:${id}" + + logger.debug(s"[validateToken]: Validated id token. Contains authenticatorid $authenticatorId") + UserInfo(authenticatorId) + } +} diff --git a/core/src/main/scala/com/ibm/csync/auth/google/ValidateGoogleToken.scala b/core/src/main/scala/com/ibm/csync/auth/google/ValidateGoogleToken.scala new file mode 100644 index 0000000..e37d34e --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/auth/google/ValidateGoogleToken.scala @@ -0,0 +1,69 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.auth.google + +import com.google.api.client.googleapis.auth.oauth2.GoogleIdTokenVerifier +import com.google.api.client.http.HttpTransport +import com.google.api.client.http.javanet.NetHttpTransport +import com.google.api.client.json.JsonFactory +import com.google.api.client.json.gson.GsonFactory +import com.ibm.csync.session.UserInfo +import com.typesafe.scalalogging.LazyLogging + +object ValidateGoogleToken extends LazyLogging { + + private val googleClientId = java.util.Arrays.asList(sys.env.getOrElse("CSYNC_GOOGLE_CLIENT_IDS", "")) + + val googleIssuer = "accounts.google.com" + val googlePlayIssuer = "https://accounts.google.com" + + val jsonFactory: JsonFactory = new GsonFactory() + val transport: HttpTransport = new NetHttpTransport() + + val googleVerifier: GoogleIdTokenVerifier = new GoogleIdTokenVerifier.Builder(transport, jsonFactory) + .setAudience(googleClientId) + .setIssuer(googleIssuer) + .build() + + val googlePlayVerifier: GoogleIdTokenVerifier = new GoogleIdTokenVerifier.Builder(transport, jsonFactory) + .setAudience(googleClientId) + .setIssuer(googlePlayIssuer) + .build() + + def validate(token: String): Option[UserInfo] = { + logger.info(s"[validateToken]: $token Validating google id token representing user’s identity asserted by the identity provider") + + // token validation + // Verify can throw or return null - we combine these into null to avoid duplicate error handling. + Option(googleVerifier.verify(token)) orElse { + Option(googlePlayVerifier.verify(token)) + } map { idToken => + val payload = idToken.getPayload + val expires = payload.getExpirationTimeSeconds + if ((expires * 1000) < System.currentTimeMillis()) { + throw new Exception("Cannot establish session. Token validation failed -- token expired.") + } + val authenticatorId = s"${payload.getIssuer}:${payload.getSubject}" + val email: Option[String] = Option(payload.get("email")) map { + _.toString() + } + + logger.debug(s"[validateToken]: Validated id token. Contains authenticatorid $authenticatorId and email $email") + UserInfo(authenticatorId, expires) + } + } +} diff --git a/core/src/main/scala/com/ibm/csync/commands/Advance.scala b/core/src/main/scala/com/ibm/csync/commands/Advance.scala new file mode 100644 index 0000000..9333fff --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/commands/Advance.scala @@ -0,0 +1,85 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.commands + +import java.sql.Connection + +import com.ibm.csync.database._ +import com.ibm.csync.session.Session +import com.ibm.csync.types.Pattern + +case class Advance(rvts: Long, pattern: Seq[String]) extends Command { + + override def doit(us: Session): AdvanceResponse = { + us.transaction { sqlConnection => + val limit = 10 + val (patternWhere, patternVals) = Pattern(pattern).asWhere + val acls = getAcls(sqlConnection, us.userInfo) + val aclWhere = List.fill(acls.length)("?").mkString(",") + val queryVals1 = Seq(rvts) ++ patternVals ++ acls ++ Seq(us.userInfo.userId, limit) + + var maxVts = SqlStatement.queryResult( + sqlConnection, + "SELECT last_value FROM latest_vts_seq", + Seq() + ) { rs => rs.getLong("last_value") }.head + + val rs1: Seq[Long] = getFromLatest(sqlConnection, patternWhere, aclWhere, queryVals1) + if (rs1.length == limit) { + maxVts = rs1.last + } + + val queryVals2 = Seq(rvts, maxVts) ++ patternVals ++ acls ++ Seq(us.userInfo.userId, limit) + val rs2: Seq[Long] = getFromAttic(sqlConnection, patternWhere, aclWhere, queryVals2) + if (rs2.length == limit) { + maxVts = rs2.last + AdvanceResponse(rs1.filter(_ < maxVts) ++ rs2, maxVts) + } else { + AdvanceResponse(rs1 ++ rs2, maxVts) + } + } + } + + def addTerm(op: String, term: String): String = { + if (term.length > 0) { s"$op $term" } else { "" } + } + + private def getFromAttic(sqlConnection: Connection, patternWhere: String, aclWhere: String, queryVals2: Seq[Any]) = + SqlStatement.queryResult( + sqlConnection, + s""" + SELECT vts FROM attic WHERE vts > ? AND vts < ? + ${addTerm("AND", patternWhere)} + AND (aclid IN ($aclWhere) OR creatorid = ?) + ORDER BY vts LIMIT ? + """, + queryVals2 + ) { rs => rs.getLong("vts") } + + private def getFromLatest(sqlConnection: Connection, patternWhere: String, aclWhere: String, queryVals1: Seq[Any]) = + SqlStatement.queryResult( + sqlConnection, + s""" + SELECT vts FROM latest WHERE vts > ? + ${addTerm("AND", patternWhere)} + AND (aclid IN ($aclWhere) OR creatorid = ?) + ORDER BY vts LIMIT ? + """, + queryVals1 + ) { rs => rs.getLong("vts") } + +} diff --git a/core/src/main/scala/com/ibm/csync/commands/Fetch.scala b/core/src/main/scala/com/ibm/csync/commands/Fetch.scala new file mode 100644 index 0000000..ee7386f --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/commands/Fetch.scala @@ -0,0 +1,76 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.commands + +import com.ibm.csync.database._ +import com.ibm.csync.session.Session + +import scala.collection.mutable + +case class Fetch(vts: Seq[Long]) extends Command { + + //override def shortString: String = s"$vts" + + val FETCH_GROUP_SIZE = 10 + + override def doit(us: Session): FetchResponse = { + val updates = mutable.ArrayBuffer[Data]() + + us.transaction { sqlConnection => + val acls = com.ibm.csync.commands.getAcls(sqlConnection, us.userInfo) + val aclWhere = List.fill(acls.length)("?").mkString(",") + + // sort vts list (will most likely be ordered, but just to be sure) + vts.sorted.grouped(FETCH_GROUP_SIZE).toList.foreach { vtsChunk => + val vtsWhere = List.fill(vtsChunk.length)("?").mkString(",") + val queryVals = acls ++ Seq(us.userInfo.userId) ++ vtsChunk + updates ++= SqlStatement.queryResult( + sqlConnection, + s""" + SELECT vts,cts,key,aclid,creatorid,isDeleted,data FROM latest + WHERE (aclid IN ($aclWhere) OR creatorid = ?) + AND vts IN ($vtsWhere) + """, + queryVals + ) { rs => + Data( + vts = rs.getLong("vts"), cts = rs.getLong("cts"), path = rs.getString("key").split('.'), + acl = rs.getString("aclid"), creator = rs.getString("creatorid"), deletePath = rs.getBoolean("isdeleted"), + data = Option(rs.getString("data")) + ) + } + + updates ++= SqlStatement.queryResult( + sqlConnection, + s""" + SELECT vts,key,aclid,creatorid FROM attic + WHERE (aclid IN ($aclWhere) OR creatorid = ?) + AND vts IN ($vtsWhere) + """, + queryVals + ) { rs => + Data( + vts = rs.getLong("vts"), cts = 0, path = rs.getString("key").split('.'), + acl = rs.getString("aclid"), creator = rs.getString("creatorid"), deletePath = true, data = None + ) + } + } + } + FetchResponse(updates) + } + +} diff --git a/core/src/main/scala/com/ibm/csync/commands/GetAcls.scala b/core/src/main/scala/com/ibm/csync/commands/GetAcls.scala new file mode 100644 index 0000000..1c49373 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/commands/GetAcls.scala @@ -0,0 +1,29 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.commands + +import com.ibm.csync.session.Session + +case class GetAcls(ignore: Option[String]) extends Command { + + override def doit(us: Session): GetAclsResponse = { + us.transaction { sqlConnection => + val acls = getAcls(sqlConnection, us.userInfo) + GetAclsResponse(acls) + } + } +} diff --git a/core/src/main/scala/com/ibm/csync/commands/Pub.scala b/core/src/main/scala/com/ibm/csync/commands/Pub.scala new file mode 100644 index 0000000..31d3e96 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/commands/Pub.scala @@ -0,0 +1,382 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.commands + +import java.sql.{Connection, ResultSet} + +import com.ibm.csync.database.SqlStatement +import com.ibm.csync.session.Session +import com.ibm.csync.types._ +import com.ibm.csync.types.ResponseCode._ + +import scala.collection.mutable + +class PubState(sqlConnection: Connection, req: Pub, us: Session) { + + private[commands] val updates = mutable.ArrayBuffer[Data]() + private val pubKey = Key(req.path) + private val pubData = req.data + private val creatorId = CreatorId(us.userInfo.userId) + private val pubAcl = req.assumeACL map { ACL(_, creatorId) } + + def delete(): VTS = { + SqlStatement.runQuery( + sqlConnection, + "SELECT vts,cts,aclid,creatorid FROM latest WHERE key = ? AND isDeleted = false FOR UPDATE", + Seq(pubKey.asString) + ) { rs => + if (rs.next) { + val oldVts = rs.getLong("vts") + val oldCts = rs.getLong("cts") + val oldCreator = CreatorId(rs.getString("creatorid")) + val oldAcl = ACL(rs.getString("aclid"), oldCreator) + + if (req.cts <= oldCts) PubCtsCheckFailed.throwIt() + + oldAcl.checkDelete(sqlConnection, us.userInfo) + + val newVts = SqlStatement.updateGetVts( + sqlConnection, + "UPDATE latest SET vts=default, cts = ?, isDeleted = true, data = null WHERE vts = ? RETURNING vts", + Seq(req.cts, oldVts) + ) + + updates += Data( + vts = newVts.vts, + cts = req.cts, + acl = oldAcl.id, + creator = oldCreator.id, + path = pubKey.asStrings, + deletePath = true, + data = None + ) + newVts + } else { + /* TODO: Is this really needed? */ + CannotDeleteNonExistingPath.throwIt() + } + } + } + + // + // Create entry in database + // Precondition: no entry for that key in latest table + // + def create(): VTS = { + val effectiveParentAcl = getEffectiveAcl(pubKey.parent) + effectiveParentAcl.checkCreate(sqlConnection, us.userInfo) + val newAcl = pubAcl.getOrElse(ACL(effectiveParentAcl.id, creatorId)) + // TODO: we don't need this + SqlStatement.runUpdate( + sqlConnection, + "DELETE FROM attic WHERE key = ? AND aclid = ? AND creatorid = ?", + Seq(pubKey.asString, newAcl.id, creatorId.id) + ) { n => assert(n <= 1) } + val newVts = SqlStatement.updateGetVts( + sqlConnection, + s""" + INSERT INTO latest (cts,aclid,creatorid,key,isDeleted,data${dbNames(pubKey)}) + VALUES (?,?,?,?,false,?${dbVals(pubKey)}) RETURNING vts + """, + Seq(req.cts, newAcl.id, creatorId.id, pubKey.asString, pubData.orNull) ++ (pubKey.parts map { _.asString }) + ) + + updates += Data( + vts = newVts.vts, + cts = req.cts, + acl = newAcl.id, + creator = creatorId.id, + deletePath = false, + path = pubKey.asStrings, + data = pubData + ) + + newVts + } + + def getEffectiveAcl(key: Key): ACL = { + if (key.parts.isEmpty) { + ACL("$publicCreate", CreatorId("$publicUser")) + } else { + SqlStatement.runQuery( + sqlConnection, + "SELECT aclid,creatorid,isDeleted,vts FROM latest WHERE key = ?", + Seq(key.asString) + ) { rs => + if (rs.next) { + if (rs.getBoolean("isDeleted")) { + getEffectiveAcl(key.parent) + } else { + ACL(rs.getString("aclid"), CreatorId(rs.getString("creatorid"))) + } + } else { + getEffectiveAcl(key.parent) + } + } + } + } + + def createOrUpdate(): VTS = { + + SqlStatement.runQuery( + sqlConnection, + "SELECT vts,cts,aclid,creatorid,isDeleted FROM latest WHERE key = ? FOR UPDATE", + Seq(pubKey.asString) + ) { rs => + if (rs.next) { + doUpdate(rs) + } else { + create() + } + } + } + + private def doUpdate(rs: ResultSet) = { + val oldVts = VTS(rs.getLong("vts")) + val oldCts = rs.getLong("cts") + val oldIsDeleted = rs.getBoolean("isDeleted") + val oldCreatorId = CreatorId(rs.getString("creatorid")) + val oldAcl = ACL(rs.getString("aclid"), oldCreatorId) + + if (req.cts <= oldCts) PubCtsCheckFailed.throwIt() + + val newAclId = req.assumeACL.getOrElse(oldAcl.id) + + if (oldIsDeleted) { + doUpdateDeleted(oldVts, oldCreatorId, oldAcl, newAclId) + } else { + doUpdateInPlace(oldVts, oldCreatorId, oldAcl, newAclId) + } + } + + private def doUpdateInPlace(oldVts: VTS, oldCreatorId: CreatorId, oldAcl: ACL, newAclId: String) = { + oldAcl.checkUpdate(sqlConnection, us.userInfo) + if (oldAcl.id != newAclId || oldCreatorId.id != creatorId.id) { + val keys = Seq(pubKey.asString, oldAcl.id, oldCreatorId.id) + // changing ACL, make it look like we're deleting the old record + val deleteVts = SqlStatement.updateGetVts( + sqlConnection, + s"""INSERT INTO attic (vts,key,aclid,creatorid${dbNames(pubKey)}) + VALUES (nextval('latest_vts_seq'),?,?,?${dbVals(pubKey)}) + ON CONFLICT (key,aclid,creatorid) DO + UPDATE SET vts = nextval('latest_vts_seq') + WHERE attic.key = ? and attic.aclid = ? and attic.creatorid = ? + RETURNING vts""", + keys ++ (pubKey.parts map { _.asString }) ++ keys + ) + updates += Data( + cts = req.cts, + vts = deleteVts.vts, + deletePath = true, + acl = oldAcl.id, + creator = oldCreatorId.id, + path = pubKey.asStrings, + data = None + ) + } + // TODO: Need to decide how to interpret data=None on a pub + val newVts = pubData match { + case Some(stuff) => + SqlStatement.updateGetVts( + sqlConnection, + "UPDATE latest SET vts=default, cts = ?, aclid = ?, data = ? WHERE vts = ? RETURNING vts", + Seq(req.cts, newAclId, stuff, oldVts.vts) + ) + case None => + SqlStatement.updateGetVts( + sqlConnection, + "UPDATE latest SET vts=default, cts = ?, aclid = ? WHERE vts = ? RETURNING vts", + Seq(req.cts, newAclId, oldVts.vts) + ) + } + updates += Data( + cts = req.cts, + vts = newVts.vts, + deletePath = false, + acl = newAclId, + creator = oldCreatorId.id, + path = pubKey.asStrings, + data = pubData + ) + newVts + } + + private def doUpdateDeleted(oldVts: VTS, oldCreatorId: CreatorId, oldAcl: ACL, newAclId: String) = { + // move deleted record to attic + SqlStatement.runUpdate1(sqlConnection, "DELETE FROM latest WHERE vts = ?", Seq(oldVts.vts)) + if (oldAcl.id != newAclId || oldCreatorId.id != creatorId.id) { + // We maintain the invariant that the same key+aclid+creatorid will never exist in both latest and attic, + // so this insert should never encounter a duplicate key exception + + val keys = Seq(pubKey.asString, oldAcl.id, oldCreatorId.id) + + SqlStatement.runUpdate1( + sqlConnection, + s"""INSERT INTO attic (vts,key,aclid,creatorid${dbNames(pubKey)}) + VALUES (?,?,?,?${dbVals(pubKey)}) + ON CONFLICT (key,aclid,creatorid) DO + UPDATE SET vts = ? WHERE attic.key = ? and attic.aclid = ? and attic.creatorid = ? + """, + Seq(oldVts.vts) ++ keys ++ pubKey.parts.map { _.asString } ++ Seq(oldVts.vts) ++ keys + + ) + } + create() + } +} + +case class Pub(cts: Long, path: Seq[String], data: Option[String], + deletePath: Boolean, assumeACL: Option[String], schema: Option[String]) extends Command { + + //override def shortString: String = s"${if (deletePath) "delete " else ""}${path.mkString(".")}:$assumeACL@$cts" + + def addUser(sqlConnection: Connection, userId: String, authenticatorId: String): Unit = { + SqlStatement.runUpdate( + sqlConnection, + "INSERT INTO users (userid,authenticatorid) VALUES (?,?) ON CONFLICT DO NOTHING", + Seq(userId, authenticatorId) + ) { n => assert(n <= 1) } + } + + def addGroup(sqlConnection: Connection, groupId: String): Unit = { + SqlStatement.runUpdate( + sqlConnection, + "INSERT INTO groups (groupid) VALUES (?) ON CONFLICT DO NOTHING", + Seq(groupId) + ) { n => assert(n <= 1) } + } + + def addMembership(sqlConnection: Connection, groupId: String, userId: String): Unit = { + SqlStatement.runUpdate( + sqlConnection, + "INSERT INTO membership (groupid,userid) VALUES (?,?) ON CONFLICT DO NOTHING", + Seq(groupId, userId) + ) { n => assert(n <= 1) } + } + + override def doit(us: Session): PubResponse = { + + // Invariants (guarded by transactions): + // - a given key,acl,creatorid combination will never be present in both latest and attic + // - for a given key, the entry in latest will have the largest VTS + // + + val aclToReaders = mutable.Map[ACL, Seq[String]]() + + val (dbOutcome, newVts) = us.transaction { implicit sqlConnection => + val state = new PubState(sqlConnection, this, us) + + // perform the operation + val vts = if (deletePath) { + state.delete() + } else { + state.createOrUpdate() + } + + // perform any side-effects + val steps = state.updates.toList + + doSideEffects(sqlConnection, aclToReaders, steps) + + (steps, vts) + } + + for (r <- dbOutcome) { + import boopickle.Default._ + val acl = ACL(r.acl, CreatorId(r.creator)) + val data = Pickle.intoBytes(r).array() + val targetGroups = aclToReaders(acl) + + targetGroups foreach { us.send(_, r.path, data) } + } + + PubResponse(0, "OK", cts, newVts.vts) + } + + private def doSideEffects( + sqlConnection: Connection, + aclToReaders: mutable.Map[ACL, Seq[String]], steps: List[Data] + ) = { + for (s <- steps) { + val creator = CreatorId(s.creator) + val a = ACL(s.acl, creator) + + val groups = aclToReaders.get(a) match { + case Some(g) => + g + case None => + val t = a.getReadGroups(sqlConnection) + aclToReaders(a) = t + t + } + + s.path match { + case Seq("sys", "acls", aclId, permission, groupId) => + doAclUpdate(sqlConnection, s, aclId, permission, groupId) + case Seq("sys", "users", userId) => + doUserUpdate(sqlConnection, s, userId) + case Seq("sys", "groups", groupId) => + doGroupUpdate(sqlConnection, s, groupId) + case Seq("sys", "groups", groupId, "member", userId) => + doMembershipUpdate(sqlConnection, s, groupId, userId) + case _ => + } + } + } + + private def doMembershipUpdate(sqlConnection: Connection, s: Data, groupId: String, userId: String) = { + if (s.deletePath) { + ??? + } else { + addMembership(sqlConnection, groupId, userId) + } + } + + private def doGroupUpdate(sqlConnection: Connection, s: Data, groupId: String) = { + if (s.deletePath) { + ??? + } else { + addGroup(sqlConnection, groupId) + } + } + + private def doUserUpdate(sqlConnection: Connection, s: Data, userId: String) = { + if (s.deletePath) { + ??? + } else { + addUser(sqlConnection, userId, "authenticator") + addGroup(sqlConnection, userId) + addMembership(sqlConnection, userId, userId) + } + } + + private def doAclUpdate(sqlConnection: Connection, s: Data, aclId: String, permission: String, groupId: String) = { + if (s.deletePath) { + SqlStatement.runUpdate( + sqlConnection, + "DELETE acls WHERE aclid = ? AND acltype = ? AND groupid = ?", + Seq(aclId, permission, groupId) + ) { n => assert(n <= 1) } + } else { + SqlStatement.runUpdate( + sqlConnection, + "INSERT INTO acls (aclid,acltype,groupid) VALUES (?,?,?) ON CONFLICT DO NOTHING", + Seq(aclId, permission, groupId) + ) { n => assert(n <= 1) } + } + } +} diff --git a/core/src/main/scala/com/ibm/csync/commands/Response.scala b/core/src/main/scala/com/ibm/csync/commands/Response.scala new file mode 100644 index 0000000..457b6af --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/commands/Response.scala @@ -0,0 +1,49 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.commands + +import com.ibm.csync.types.ResponseCode + +sealed trait Response { + def kind: String +} + +case class AdvanceResponse(vts: Seq[Long], maxvts: Long) extends Response { + override def kind: String = "advanceResponse" +} +case class ConnectResponse(uuid: String, uid: String, expires: Long) extends Response { + override def kind: String = "connectResponse" +} +case class Data(path: Seq[String], data: Option[String], deletePath: Boolean, + acl: String, creator: String, cts: Long, vts: Long) extends Response { + override def kind: String = "data" +} +case class Err(msg: String, cause: Option[String]) extends Response { + override def kind: String = "error" +} +case class FetchResponse(response: Seq[Data]) extends Response { + override def kind: String = "fetchResponse" +} +case class GetAclsResponse(acls: Seq[String]) extends Response { + override def kind: String = "getAclsResponse" +} +case class Happy(code: Int, msg: String) extends Response { + override def kind: String = "happy" +} +case class PubResponse(code: Int, msg: String, cts: Long, vts: Long) extends Response { + override def kind: String = "happy" // TODO: fix this +} diff --git a/core/src/main/scala/com/ibm/csync/commands/Sub.scala b/core/src/main/scala/com/ibm/csync/commands/Sub.scala new file mode 100644 index 0000000..b32abbd --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/commands/Sub.scala @@ -0,0 +1,28 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.commands + +import com.ibm.csync.session.Session +import com.ibm.csync.types.Pattern +import com.ibm.csync.types.ResponseCode.OK + +case class Sub(path: Seq[String]) extends Command { + override def doit(us: Session): Response = { + us.subscribe(Pattern(path)) + Happy(OK.id, OK.name) + } +} diff --git a/core/src/main/scala/com/ibm/csync/commands/Unsub.scala b/core/src/main/scala/com/ibm/csync/commands/Unsub.scala new file mode 100644 index 0000000..de8e61b --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/commands/Unsub.scala @@ -0,0 +1,28 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.commands + +import com.ibm.csync.session.Session +import com.ibm.csync.types.Pattern +import com.ibm.csync.types.ResponseCode.OK + +case class Unsub(path: Seq[String]) extends Command { + override def doit(us: Session): Response = { + us.unsubscribe(Pattern(path)) + Happy(OK.id, OK.name) + } +} diff --git a/core/src/main/scala/com/ibm/csync/commands/package.scala b/core/src/main/scala/com/ibm/csync/commands/package.scala new file mode 100644 index 0000000..db21598 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/commands/package.scala @@ -0,0 +1,55 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync + +import java.sql.Connection + +import com.ibm.csync.database.SqlStatement +import com.ibm.csync.session.{Session, UserInfo} +import com.ibm.csync.types.{ACL, Key} + +package object commands { + + trait Command { + def doit(us: Session): Response + //def shortString: String + } + + val MESSAGE_VERSION = 15 + + def dbNames(key: Key): String = { + (key.parts.indices map { i => s",key$i" }).mkString + } + + def dbVals(key: Key): String = { + ",?" * key.parts.length + } + + def getAcls(sqlConnection: Connection, user: UserInfo): Seq[String] = { + val acls = ACL.wellKnowReadableACLids ++ SqlStatement.queryResult( + sqlConnection, + """ + SELECT DISTINCT acls.aclid AS aclid + FROM acls, membership + WHERE membership.userid = ? AND membership.groupid = acls.groupid AND acltype = 'read' + """, + Seq(user.userId) + ) { rs => rs.getString("aclid") } + acls + } + +} diff --git a/core/src/main/scala/com/ibm/csync/database/Database.scala b/core/src/main/scala/com/ibm/csync/database/Database.scala new file mode 100644 index 0000000..bc987cc --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/database/Database.scala @@ -0,0 +1,219 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.database + +import java.sql.SQLException +import javax.sql.DataSource + +import com.ibm.csync.Utils._ +import com.typesafe.scalalogging.LazyLogging + +case class Table(name: String, fields: Seq[Field] = Seq(), indexes: Seq[Index] = Seq()) { + + def fields(f: Field, fs: Field*): Table = this.copy(fields = fields ++ Seq(f) ++ fs.toSeq) + + def fields(fs: Traversable[Field]): Table = this.copy(fields = fields ++ fs.toSeq) + + def indexes(i: Index, is: Index*): Table = this.copy(indexes = indexes ++ Seq(i) ++ is.toSeq) + + def index(col: String, rest: String*): Table = { + val cols = Seq(col) ++ rest + val index = Index( + name, + (Seq(name) ++ cols).mkString("_"), + false, + cols: _* + ) + this.copy(indexes = indexes ++ Seq(index)) + } + + def getSQL: Seq[String] = Seq(s"create table $name ()") ++ + (fields map { f => s"alter table $name add column ${f.asString}" }) ++ + (indexes map { _.asString }) + + def unique(cols: String*): Table = { + val index = Index(name, (Seq(name) ++ cols.toSeq).mkString("_"), true, cols: _*) + this.copy(indexes = indexes ++ Seq(index)) + } + +} + +trait Step { + val asString: String +} + +case class Field(name: String, typ: String, + isNull: Boolean = true, + limit: Option[Long] = None, + isPrimary: Boolean = false, + isUnique: Boolean = false, + extra: String = "") extends Step { + lazy val asString: String = "%s %s %s %s %s %s %s".format( + name, + typ, + limit match { + case Some(x) => "(%d)".format(x) + case None => "" + }, + if (isNull) " NULL " else " NOT NULL ", + if (isPrimary) " PRIMARY KEY " else "", + if (isUnique) "UNIQUE" else "", + extra + ) + + def primary: Field = this.copy(isPrimary = true) + + def length(x: Long): Field = this.copy(limit = Some(x)) + + def notNull: Field = this.copy(isNull = false) + + def maybeNull: Field = this.copy(isNull = true) + + def unique: Field = this.copy(isUnique = true) + + def extra(txt: String): Field = this.copy(extra = extra + " " + txt) +} + +case class Index(table: String, name: String, isUnique: Boolean, cols: String*) extends Step { + lazy val asString: String = "create %s index %s on %s (%s)".format( + if (isUnique) "UNIQUE" else "", + name, + table, + cols.mkString(",") + ) + +} + +object Database extends LazyLogging { + + private def bigint(name: String) = Field(name, "bigint") + + private def varchar(name: String) = Field(name, "varchar") + + private def serial(name: String) = Field(name, "serial", isNull = false) + + private def boolean(name: String) = Field(name, "boolean") + + private def addKeys(t: Table, i: Int): Table = if (i < 0) t else { + val colName = s"key$i" + addKeys( + t.fields(varchar(colName).maybeNull).index(colName), + i - 1 + ) + } + + val KEY_COMPONENTS = 16 + + def getSQL: Seq[String] = Seq( + addKeys( + Table("latest").fields( + serial("vts").unique, + bigint("cts").notNull, + varchar("key").notNull.unique, + varchar("aclid").notNull, + boolean("isDeleted").notNull, + varchar("creatorId").notNull, + varchar("data") + ), + KEY_COMPONENTS - 1 + ), + addKeys( + Table("attic").fields( + serial("vts").unique, + varchar("key").notNull, + varchar("aclid").notNull, + varchar("creatorId").notNull + ), + KEY_COMPONENTS - 1 + ).unique("key", "aclid", "creatorId").index("key"), + + Table("users").fields( + varchar("userId").primary, + varchar("authenticatorId").notNull + ).unique("userId", "authenticatorId").index("userId").index("authenticatorId"), + + Table("groups").fields( + varchar("groupId").primary, + varchar("groupName") + ), + + Table("membership").fields( + varchar("userId").notNull.extra("references users ON DELETE CASCADE"), + varchar("groupId").notNull.extra("references groups ON DELETE CASCADE") + ).unique("userId", "groupId"), + + Table("acls").fields( + varchar("aclId").notNull, + varchar("aclType").notNull, + varchar("groupId").notNull.extra("references groups ON DELETE CASCADE") + ).unique("aclId", "aclType", "groupId") + ) flatMap (_.getSQL) + + private def handleSQLException(ex: SQLException): Unit = { + ex.getSQLState match { + case "42P07" => + logger.debug("relation already exists") + case "42701" => + logger.debug("column already exists") + case x => + logger.debug(s"SQLState = $x") + throw ex + } + + } + + def createTables(ds: DataSource): String = { + using(ds.getConnection()) { c => + c.setAutoCommit(true) + using(c.createStatement()) { s => + for (sql <- Database.getSQL) { + logger.debug(s"$sql") + try { + val x = s.execute(sql) + logger.debug(s"$x") + logger.debug(s"${s.getUpdateCount}") + } catch { + case ex: SQLException => handleSQLException(ex) + } + } + } + } + + using(ds.getConnection()) { c2 => + c2.setAutoCommit(true) + using(c2.createStatement()) { s => + val newUUID = java.util.UUID.randomUUID().toString + val n = s.executeUpdate( + s""" + INSERT INTO latest (cts,key,aclid,isDeleted,creatorId,data,key0,key1,key2) + VALUES (0,'sys.info.uuid','$$PublicRead',false,'$$publicUser','$newUUID','sys','info','uuid') + ON CONFLICT DO NOTHING + """ + ) + if (n == 1) { + newUUID + } else { + using(s.executeQuery("SELECT data from latest where key = 'sys.info.uuid'")) { rs => + rs.next() + rs.getString("data") + } + } + } + } + } + +} diff --git a/core/src/main/scala/com/ibm/csync/database/SqlStatement.scala b/core/src/main/scala/com/ibm/csync/database/SqlStatement.scala new file mode 100644 index 0000000..937dc3d --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/database/SqlStatement.scala @@ -0,0 +1,74 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.database + +import java.sql.{Connection, PreparedStatement, ResultSet} + +import com.ibm.csync.Utils.using +import com.ibm.csync.types.VTS +import com.typesafe.scalalogging.LazyLogging + +import scala.collection.mutable + +object SqlStatement extends LazyLogging { + def prep[T](c: Connection, sql: String, args: Seq[Any])(f: PreparedStatement => T): T = { + logger.debug(sql) + using(c.prepareStatement(sql)) { s => + var i = 1 + for (a <- args) { + s.setObject(i, a) + logger.debug(s".... ${String.valueOf(a)}") + i += 1 + } + f(s) + } + } + + def runQuery[T](c: Connection, sql: String, args: Seq[Any])(f: ResultSet => T): T = prep(c, sql, args) { s => + using(s.executeQuery()) { rs => + f(rs) + } + } + + def queryResult[T](c: Connection, sql: String, args: Seq[Any])(f: ResultSet => T): Seq[T] = + prep(c, sql, args) { s => + using(s.executeQuery()) { rs => + val b = mutable.Buffer[T]() + while (rs.next) { + b.append(f(rs)) + } + b.asInstanceOf[Seq[T]] + } + } + + def runUpdate[T](c: Connection, sql: String, args: Seq[Any])(f: Int => T): T = prep(c, sql, args) { s => + val res = s.executeUpdate() + logger.debug(s".... returned $res") + f(res) + } + + def runUpdate1(c: Connection, sql: String, args: Seq[Any]): Unit = runUpdate(c, sql, args) { count => + assert(count == 1) + } + + def updateGetVts(c: Connection, sql: String, args: Seq[Any]): VTS = runQuery(c, sql, args) { rs => + rs.next + val vts = rs.getLong(1) + logger.debug(s".... returned $vts") + VTS(vts) + } +} diff --git a/core/src/main/scala/com/ibm/csync/rabbitmq/Constants.scala b/core/src/main/scala/com/ibm/csync/rabbitmq/Constants.scala new file mode 100644 index 0000000..e805ae2 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/rabbitmq/Constants.scala @@ -0,0 +1,25 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.rabbitmq + +import com.rabbitmq.client.AMQP.BasicProperties + +object Constants { + val basicProperties = new BasicProperties() + val PORT = 15672 + val HEARTBEAT = 10 +} diff --git a/core/src/main/scala/com/ibm/csync/rabbitmq/Exchange.scala b/core/src/main/scala/com/ibm/csync/rabbitmq/Exchange.scala new file mode 100644 index 0000000..35582e2 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/rabbitmq/Exchange.scala @@ -0,0 +1,41 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.rabbitmq + +import com.rabbitmq.client.AMQP +import com.rabbitmq.client.Channel +import com.typesafe.scalalogging.LazyLogging + +case class Exchange(info: ExchangeInfo, ch: Channel) extends LazyLogging { + + import Constants._ + + def publish(key: RoutingKey, data: String): Unit = + ch.basicPublish(info.name, key.asString, basicProperties, data.getBytes()) + + def bindTo(to: Exchange, routingKey: RoutingKey): AMQP.Exchange.BindOk = + ch.exchangeBind(to.info.name, info.name, routingKey.asString) + + def unbindTo(to: Exchange, routingKey: RoutingKey): AMQP.Exchange.UnbindOk = + ch.exchangeUnbind(to.info.name, info.name, routingKey.asString) + + def bindTo(to: Queue, routingKey: RoutingKey): AMQP.Queue.BindOk = + ch.queueBind(to.info.name, info.name, routingKey.asString) + + def unbindTo(to: Queue, routingKey: RoutingKey): AMQP.Queue.UnbindOk = + ch.queueUnbind(to.info.name, info.name, routingKey.asString) +} diff --git a/core/src/main/scala/com/ibm/csync/rabbitmq/ExchangeInfo.scala b/core/src/main/scala/com/ibm/csync/rabbitmq/ExchangeInfo.scala new file mode 100644 index 0000000..c7da517 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/rabbitmq/ExchangeInfo.scala @@ -0,0 +1,41 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.rabbitmq + +import com.rabbitmq.client.Channel +import com.typesafe.scalalogging.LazyLogging + +case class ExchangeInfo(id: String, typ: String = "topic", durable: Boolean = false, + autoDelete: Boolean = false, + args: Map[String, Object] = Map()) extends LazyLogging { + def id(x: String): ExchangeInfo = this.copy(id = x) + def typ(x: String): ExchangeInfo = this.copy(typ = x) + def durable(x: Boolean): ExchangeInfo = this.copy(durable = x) + def autoDelete(x: Boolean): ExchangeInfo = this.copy(autoDelete = x) + def args(x: Map[String, Object]): ExchangeInfo = this.copy(args = x) + def arg(k: String, v: Object): ExchangeInfo = this.copy(args = args + (k -> v)) + + lazy val name = "x3-" + id + + def declare(ch: Channel): Exchange = { + import collection.JavaConverters._ + + ch.exchangeDeclare(name, typ, durable, autoDelete, args.asJava) + Exchange(this, ch) + + } +} diff --git a/core/src/main/scala/com/ibm/csync/rabbitmq/Factory.scala b/core/src/main/scala/com/ibm/csync/rabbitmq/Factory.scala new file mode 100644 index 0000000..80c6565 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/rabbitmq/Factory.scala @@ -0,0 +1,68 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.rabbitmq + +import com.rabbitmq.client.{Connection, ConnectionFactory} +import org.slf4j.LoggerFactory + +import scalaj.http.Http + +case class Factory( + uri: String +) { + + lazy val connFactory: ConnectionFactory = { + val t = new ConnectionFactory + t.setUri(uri) + t + } + + def newConnection: Connection = { + val perms = + """ + { + "configure" : ".*", + "read" : ".*", + "write" : ".*" + } + """ + + val url = s"http://${connFactory.getHost}:" + Constants.PORT + + try { + Http(s"$url/api/vhosts/${connFactory.getVirtualHost}") + .header("content-type", "application/json") + .auth(connFactory.getUsername, connFactory.getPassword).put("") + .asString + .throwError + + Http(s"$url/api/permissions/${connFactory.getVirtualHost}/${connFactory.getUsername}") + .header("content-type", "application/json") + .auth(connFactory.getUsername, connFactory.getPassword) + .put(perms) + .asString + .throwError + } catch { + + case _ => { + lazy val logger = LoggerFactory.getLogger(getClass) + logger.debug("RabbitMQ: Unable to create virtualHost and set permissions.") + } + } + connFactory.newConnection + } +} diff --git a/core/src/main/scala/com/ibm/csync/rabbitmq/Queue.scala b/core/src/main/scala/com/ibm/csync/rabbitmq/Queue.scala new file mode 100644 index 0000000..b53c21a --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/rabbitmq/Queue.scala @@ -0,0 +1,37 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.rabbitmq + +import com.rabbitmq.client.AMQP +import com.rabbitmq.client.Channel +import com.typesafe.scalalogging.LazyLogging + +import scala.concurrent.{ExecutionContext, Future} + +case class Queue(info: QueueInfo, ch: Channel) extends LazyLogging { + + def remove(implicit ec: ExecutionContext): Future[AMQP.Queue.DeleteOk] = Future { + logger.debug(s"[Queue.remove]: Deleting queue ${info.name} with properties $this") + ch.queueDelete(info.name) + } + + def publish(data: String)(implicit ec: ExecutionContext): Future[_] = Future { + logger.debug(s"[Queue.publish]: publishing to queue ${info.name}"); + ch.basicPublish("", info.name, Constants.basicProperties, data.getBytes()) + } + +} diff --git a/core/src/main/scala/com/ibm/csync/rabbitmq/QueueInfo.scala b/core/src/main/scala/com/ibm/csync/rabbitmq/QueueInfo.scala new file mode 100644 index 0000000..10db68b --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/rabbitmq/QueueInfo.scala @@ -0,0 +1,48 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.rabbitmq + +import com.rabbitmq.client.Channel +import com.typesafe.scalalogging.LazyLogging + +case class QueueInfo( + id: String = null, + durable: Boolean = false, + exclusive: Boolean = false, + autoDelete: Boolean = false, + args: Map[String, Object] = Map() +) extends LazyLogging { + def id(x: String): QueueInfo = this.copy(id = x) + def durable(x: Boolean): QueueInfo = this.copy(durable = x) + def exclusive(x: Boolean): QueueInfo = this.copy(exclusive = x) + def autoDelete(x: Boolean): QueueInfo = this.copy(autoDelete = x) + def args(x: Map[String, Object]): QueueInfo = this.copy(args = x) + def arg(k: String, v: Object): QueueInfo = this.copy(args = args + (k -> v)) + def queueTTL(t: Int): QueueInfo = this.arg("x-expires", new Integer(t)) + def messageTTL(t: Int): QueueInfo = this.arg("x-message-ttl", new Integer(t)) + + lazy val name: String = "q3-" + id + + def declare(ch: Channel): Queue = { + import collection.JavaConverters._ + + logger.debug(s"[QueueInfo.declare]: Declaring queue $name with properties $this") + + ch.queueDeclare(name, durable, exclusive, autoDelete, args.asJava) + Queue(this, ch) + } +} diff --git a/core/src/main/scala/com/ibm/csync/rabbitmq/RoutingKey.scala b/core/src/main/scala/com/ibm/csync/rabbitmq/RoutingKey.scala new file mode 100644 index 0000000..ac76112 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/rabbitmq/RoutingKey.scala @@ -0,0 +1,23 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.rabbitmq + +case class RoutingKey(parts: String*) { + lazy val asString: String = parts.mkString(".") + + override def toString: String = asString +} diff --git a/core/src/main/scala/com/ibm/csync/session/Constants.scala b/core/src/main/scala/com/ibm/csync/session/Constants.scala new file mode 100644 index 0000000..36c8f0d --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/session/Constants.scala @@ -0,0 +1,24 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.session + +object Constants { + + val MESSAGE_TTL = 60000 + val QUEUE_TTL = 30000 + +} diff --git a/core/src/main/scala/com/ibm/csync/session/Session.scala b/core/src/main/scala/com/ibm/csync/session/Session.scala new file mode 100644 index 0000000..bada58f --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/session/Session.scala @@ -0,0 +1,142 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.session + +import java.nio.ByteBuffer +import java.sql.{Connection => SqlConnection} +import javax.sql.DataSource + +import com.ibm.csync.auth.demo.ValidateDemoToken +import com.ibm.csync.auth.github.ValidateGitHubToken +import com.ibm.csync.auth.google.ValidateGoogleToken +import com.ibm.csync.commands.{ConnectResponse, Data, Err, Response} +import com.ibm.csync.database.SqlStatement +import com.ibm.csync.rabbitmq.{ExchangeInfo, QueueInfo, RoutingKey} +import com.ibm.csync.types.{Pattern, SessionId, Token} +import com.rabbitmq.client.AMQP.Queue.{BindOk, UnbindOk} +import com.rabbitmq.client.{AMQP, DefaultConsumer, Envelope, Connection => RabbitConnection} +import com.typesafe.scalalogging.LazyLogging + +import scala.concurrent.Future +import scala.util.matching.Regex + +object Session { + val DemoAuthProvider = "demo" + val GoogleAuthProvider = "google" + val GithubAuthProvider = "github" + + val demoToken: String = """demoToken""" + val userToken: Regex = """demoToken\((.*)\)""".r + + val masterExchangeInfo = ExchangeInfo("master") + def userExchangeInfo(userInfo: UserInfo): ExchangeInfo = masterExchangeInfo.id(userInfo.userId) + def sessionQueueInfo(sessionId: SessionId): QueueInfo = QueueInfo(id = sessionId.id) + .messageTTL(Constants.MESSAGE_TTL).queueTTL(Constants.QUEUE_TTL) +} + +case class Session(ds: DataSource, uuid: String, + connection: RabbitConnection, + authProvider: Option[String], + token: Token, + sessionId: SessionId)(outgoing: Response => Future[_]) extends LazyLogging { session => + + import Session._ + + logger.info(s"session constructor $sessionId using $token") + + private val ch = connection.createChannel() + + val userInfo: UserInfo = try { + authProvider match { + case Some(GoogleAuthProvider) => ValidateGoogleToken.validate(token.s).get + case Some(GithubAuthProvider) => ValidateGitHubToken.validate(token.s) + case Some(DemoAuthProvider) | None => ValidateDemoToken.validate(token.s) + case Some(unknownProvider) => + logger.info(s"[validateToken]: Unknown provider ${'\"'}$unknownProvider${'\"'}") + throw new Exception("Cannot establish session. Token validation failed - unknown provider") + } + } catch { + case ex: Exception => + outgoing(Err(msg = ex.getMessage, cause = None)) + throw ex + } + + private val canRead = transaction { sqlConnection => + Seq("$world", userInfo.userId) ++ SqlStatement.queryResult( + sqlConnection, + "select groupid from membership where userid = ?", Seq(userInfo.userId) + ) { rs => rs.getString(1) } + } + + private val mx = masterExchangeInfo.declare(ch) + private val ux = userExchangeInfo(userInfo).declare(ch) + + canRead foreach { g => + mx.bindTo(ux, RoutingKey(g, "#")) + } + + private val uq = sessionQueueInfo(sessionId).declare(ch) + + private val tag = ch.basicConsume(uq.info.name, true, new DefaultConsumer(ch) { + override def handleDelivery(consumerTag: String, envelope: Envelope, properties: AMQP.BasicProperties, + body: Array[Byte]): Unit = { + import boopickle.Default._ + + val res = Unpickle[Data].fromBytes(ByteBuffer.wrap(body)) + outgoing(res) + } + }) + + outgoing(ConnectResponse(uuid = uuid, uid = userInfo.userId, expires = userInfo.expires)) + + def subscribe(pattern: Pattern): BindOk = + ch.queueBind(uq.info.name, ux.info.name, "*." + pattern.asString) + + def unsubscribe(pattern: Pattern): UnbindOk = + ch.queueUnbind(uq.info.name, ux.info.name, "*." + pattern.asString) + + def send(group: String, key: Seq[String], data: Array[Byte]): Unit = { + val rk = group + "." + key.mkString(".") + ch.basicPublish(mx.info.name, rk, com.ibm.csync.rabbitmq.Constants.basicProperties, data) + } + + def close(): Unit = { + try { + logger.debug(s"closing session $sessionId, tag $tag") + ch.close() + } catch { + case e: Throwable => logger.error(s"cancel for session $sessionId", e) + } + } + + def transaction[T](f: SqlConnection => T): T = { + val c = ds.getConnection + try { + c.setAutoCommit(false) + val x = f(c) + c.commit() + x + } catch { + case e: Throwable => + c.rollback() + throw e + } finally { + c.close() + } + } + +} diff --git a/core/src/main/scala/com/ibm/csync/session/UserInfo.scala b/core/src/main/scala/com/ibm/csync/session/UserInfo.scala new file mode 100644 index 0000000..27d11fd --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/session/UserInfo.scala @@ -0,0 +1,27 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.session + +object UserInfo { + def defaultExpires(): Long = { + System.currentTimeMillis() / 1000 + 60 * 60 // one hour from now + } +} + +case class UserInfo(userId: String, expires: Long = UserInfo.defaultExpires()) { + +} diff --git a/core/src/main/scala/com/ibm/csync/types/ACL.scala b/core/src/main/scala/com/ibm/csync/types/ACL.scala new file mode 100644 index 0000000..66bfe07 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/types/ACL.scala @@ -0,0 +1,186 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +import java.sql.Connection + +import com.ibm.csync.database.SqlStatement +import com.ibm.csync.session.UserInfo +import com.ibm.csync.types.ResponseCode.{ + CreatePermissionDenied, + DeletePermissionDenied, + ReadPermissionDenied, + UpdatePermissionDenied +} + +sealed trait ACL { + val id: String + + def getReadGroups(connection: Connection): Seq[String] + def checkRead(sqlConnection: Connection, user: UserInfo): Unit + def checkUpdate(sqlConnection: Connection, user: UserInfo): Unit + def checkDelete(sqlConnection: Connection, user: UserInfo): Unit + def checkCreate(sqlConnection: Connection, user: UserInfo): Unit +} + +object ACL { + + trait Helper extends ACL { + val creatorId: CreatorId + override def getReadGroups(connection: Connection): Seq[String] = Seq(creatorId.id, "$publicUser") + override def checkRead(sqlConnection: Connection, user: UserInfo) { + if ((user.userId != creatorId.id) && (user.userId != "$publicUser")) { + ReadPermissionDenied.throwIt() + } + } + override def checkUpdate(sqlConnection: Connection, user: UserInfo) { + if ((user.userId != creatorId.id) && (user.userId != "$publicUser")) { + UpdatePermissionDenied.throwIt() + } + } + override def checkCreate(sqlConnection: Connection, user: UserInfo) { + if ((user.userId != creatorId.id) && (user.userId != "$publicUser")) { + CreatePermissionDenied.throwIt() + } + } + override def checkDelete(sqlConnection: Connection, user: UserInfo) { + if ((user.userId != creatorId.id) && (user.userId != "$publicUser")) { + DeletePermissionDenied.throwIt() + } + } + } + + case class Private(creatorId: CreatorId) extends Helper { + val id = "$private" + } + + case class PublicRead(creatorId: CreatorId) extends Helper { + val id = "$publicRead" + override def getReadGroups(connection: Connection): Seq[String] = Seq("$world") + override def checkRead(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + } + + case class PublicWrite(creatorId: CreatorId) extends Helper { + val id = "$publicWrite" + override def checkUpdate(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + override def checkDelete(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + } + + case class PublicCreate(creatorId: CreatorId) extends Helper { + val id = "$publicCreate" + override def checkCreate(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + } + + case class PublicReadWrite(creatorId: CreatorId) extends Helper { + val id = "$publicReadWrite" + override def getReadGroups(connection: Connection): Seq[String] = Seq("$world") + override def checkRead(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + override def checkUpdate(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + override def checkDelete(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + } + + case class PublicReadCreate(creatorId: CreatorId) extends Helper { + val id = "$publicReadCreate" + override def getReadGroups(connection: Connection): Seq[String] = Seq("$world") + override def checkRead(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + override def checkCreate(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + } + + case class PublicWriteCreate(creatorId: CreatorId) extends Helper { + val id = "$publicWriteCreate" + override def checkUpdate(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + override def checkDelete(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + override def checkCreate(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + } + + case class PublicReadWriteCreate(creatorId: CreatorId) extends Helper { + val id = "$publicReadWriteCreate" + override def getReadGroups(connection: Connection): Seq[String] = Seq("$world") + override def checkRead(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + override def checkUpdate(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + override def checkDelete(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + override def checkCreate(sqlConnection: Connection, user: UserInfo) { /* Allow */ } + } + + case class General(id: String, creator: CreatorId) extends ACL { + + override def getReadGroups(sqlConnection: Connection): Seq[String] = { + if (id == "$private") { + Seq(creator.id) + } else if (id.startsWith("$public")) { + if (id.contains("Read")) Seq("$world") else Seq(creator.id) + } else { + SqlStatement.queryResult( + sqlConnection, + "SELECT groupid from acls where aclid = ? and acltype = 'read'", Seq(id) + ) { rs => rs.getString("groupid") } + } + } + + private def can(sqlConnection: Connection, responseCode: ResponseCode, kind: String, userInfo: UserInfo): Unit = { + SqlStatement.runQuery( + sqlConnection, + """ + SELECT count(*) from acls join membership + ON (acls.groupId = membership.groupId) + WHERE aclId = ? and aclType = ? and userId = ? + """, + Seq(id, kind, userInfo.userId) + ) { rs => + rs.next + val count = rs.getInt(1) + if (count == 0) responseCode.throwIt() + } + } + + override def checkRead(sqlConnection: Connection, user: UserInfo): Unit = + can(sqlConnection, ReadPermissionDenied, "read", user) + + override def checkUpdate(sqlConnection: Connection, user: UserInfo): Unit = + can(sqlConnection, UpdatePermissionDenied, "update", user) + + override def checkCreate(sqlConnection: Connection, user: UserInfo): Unit = + can(sqlConnection, CreatePermissionDenied, "create", user) + + override def checkDelete(sqlConnection: Connection, user: UserInfo): Unit = + can(sqlConnection, DeletePermissionDenied, "delete", user) + } + + def wellKnowReadableACLids: Seq[String] = { + Seq("$publicRead", "$publicReadWrite", "$publicReadCreate", "$publicReadWriteCreate") + } + + def builtin(id: String, creator: CreatorId): ACL = id match { + case "$private" => Private(creator) + case "$publicRead" => PublicRead(creator) + case "$publicWrite" => PublicWrite(creator) + case "$publicCreate" => PublicCreate(creator) + case "$publicReadCreate" => PublicReadCreate(creator) + case "$publicReadWrite" => PublicReadWrite(creator) + case "$publicWriteCreate" => PublicWriteCreate(creator) + case "$publicReadWriteCreate" => PublicReadWriteCreate(creator) + case _ => throw new IllegalArgumentException(id) + } + + def apply(id: String, creator: CreatorId): ACL = + if (id.startsWith("$")) { + builtin(id, creator) + } else { + General(id, creator) + } + +} diff --git a/core/src/main/scala/com/ibm/csync/types/CTS.scala b/core/src/main/scala/com/ibm/csync/types/CTS.scala new file mode 100644 index 0000000..379a232 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/types/CTS.scala @@ -0,0 +1,21 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +case class CTS(cts: Long) extends AnyVal { + +} diff --git a/core/src/main/scala/com/ibm/csync/types/ClientError.scala b/core/src/main/scala/com/ibm/csync/types/ClientError.scala new file mode 100644 index 0000000..077e2dc --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/types/ClientError.scala @@ -0,0 +1,22 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +case class ClientError(code: ResponseCode, msg: Option[String]) extends Exception { + override def toString: String = s"ClientError($code,$msg)" + +} diff --git a/core/src/main/scala/com/ibm/csync/types/CreatorId.scala b/core/src/main/scala/com/ibm/csync/types/CreatorId.scala new file mode 100644 index 0000000..3f6a51b --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/types/CreatorId.scala @@ -0,0 +1,21 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +case class CreatorId(id: String) extends AnyVal { + +} diff --git a/core/src/main/scala/com/ibm/csync/types/Gid.scala b/core/src/main/scala/com/ibm/csync/types/Gid.scala new file mode 100644 index 0000000..e8f56c4 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/types/Gid.scala @@ -0,0 +1,19 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +case class Gid(id: String) extends AnyVal diff --git a/core/src/main/scala/com/ibm/csync/types/Key.scala b/core/src/main/scala/com/ibm/csync/types/Key.scala new file mode 100644 index 0000000..17e900b --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/types/Key.scala @@ -0,0 +1,54 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +class Key private (val parts: Seq[Identifier]) { + lazy val asString: String = parts.map { _.asString }.mkString(".") + lazy val asStrings: Seq[String] = parts.map { _.asString } + + lazy val parent = new Key(parts.take(parts.length - 1)) + + override def toString: String = s"Key($asString)" + override def hashCode: Int = asString.hashCode + override def equals(other: Any): Boolean = other match { + case o: Key => asString == o.asString + case _ => false + } +} + +object Key { + + def apply(parts: Seq[String]): Key = { + val len = parts.length + val totalLen = parts.map { _.length }.sum + (len - 1) + if (len == 0) { + ResponseCode.InvalidPathFormat.throwIt("no parts") + } + if (len > 16) { + ResponseCode.InvalidPathFormat.throwIt(s"too many parts $len in ${parts.mkString(".")}") + } + if (totalLen > 200) { + ResponseCode.InvalidPathFormat.throwIt(s"too long $totalLen ${parts.mkString(".")}") + } + val partsSeq = parts map { Part(_) } + new Key(partsSeq.collect { case x @ Identifier(_) => x }) + + } + + def apply(part: String, parts: String*): Key = apply(part +: parts) + +} diff --git a/core/src/main/scala/com/ibm/csync/types/Part.scala b/core/src/main/scala/com/ibm/csync/types/Part.scala new file mode 100644 index 0000000..4bc27b5 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/types/Part.scala @@ -0,0 +1,53 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +import scala.util.matching.Regex + +/* Parts of a path */ +sealed trait Part { + val asString: String + + val mustBeLast = false + val isBroken = false +} + +object Part { + val idRegEx: Regex = """(^[a-zA-Z0-9][a-zA-Z0-9\-_]*$)""".r + + def apply(s: String): Part = s match { + case idRegEx(id) => Identifier(id) + case "*" => Star + case "#" => Pound + case x => ResponseCode.InvalidPathFormat.throwIt(s) + } + +} + +/* '#' */ +case object Pound extends Part { + val asString = "#" + override val mustBeLast = true +} + +/* '*' */ +case object Star extends Part { + val asString = "*" +} + +case class Identifier(asString: String) extends Part + diff --git a/core/src/main/scala/com/ibm/csync/types/Pattern.scala b/core/src/main/scala/com/ibm/csync/types/Pattern.scala new file mode 100644 index 0000000..8f9b9db --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/types/Pattern.scala @@ -0,0 +1,87 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +import scala.collection.mutable + +/* Key with wildcards */ +class Pattern(parts: Seq[Part]) { + lazy val asString: String = (parts map { _.asString }).mkString(".") + lazy val asStrings: Seq[String] = parts.map { _.asString } + override def toString: String = asString + + lazy val asWhere: (String, Seq[String]) = { + val terms = mutable.Buffer[String]() + val vals = mutable.Buffer[String]() + + var i = 0 + var lastIsStar = false + var isStrict = true + var lastStarIndex = -1 + val n = parts.length + for (p <- parts) { + p match { + case Star => + lastIsStar = true + lastStarIndex = i + case Pound => + isStrict = false + case Identifier(id) => + lastIsStar = false + terms.append(s"key$i = ?") + vals.append(id) + } + i += 1 + } + + if (lastIsStar) { + terms.append(s"key$lastStarIndex is not null") + } + + if (isStrict) { + if (n < Pattern.MAX_PARTS) { + terms.append(s"key$n is null") + } + } + + (terms.mkString(" AND "), vals.asInstanceOf[Seq[String]]) + } +} + +object Pattern { + + val MAX_LENGTH = 200 + val MAX_PARTS = 16 + + def apply[T](parts: Seq[String]): Pattern = { + val len = parts.length + val totalLen = parts.map { _.length }.sum + (len - 1) + if (len == 0) { + throw new IllegalArgumentException("no parts") + } + if (len > MAX_PARTS) { + throw new IllegalArgumentException(s"too many parts $len") + } + if (totalLen > MAX_LENGTH) { + throw new IllegalArgumentException(s"too long $totalLen") + } + + val partsSeq = parts map { Part(_) } + new Pattern(partsSeq) + } + +} diff --git a/core/src/main/scala/com/ibm/csync/types/ResponseCodes.scala b/core/src/main/scala/com/ibm/csync/types/ResponseCodes.scala new file mode 100644 index 0000000..8b6e9fe --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/types/ResponseCodes.scala @@ -0,0 +1,63 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +sealed trait ResponseCode { + val id: Int + val name: String + + def throwIt(): Unit +} + +object ResponseCode { + + class Value(val id: Int)(implicit nm: sourcecode.Name) extends ResponseCode { + val name: String = nm.value + + def throwIt(): Nothing = throw ClientError(this, None) + def throwIt(msg: String): Nothing = throw ClientError(this, Some(msg)) + } + + // Error codes are magic numbers. Multiplying by 1 confuses the style checker and lets me express what + // I want to express + case object OK extends Value(0) + case object NotAuthorizedToPub extends Value(1) + case object InvalidPathFormat extends Value(2) + case object CreatePermissionDenied extends Value(3) + case object ReadPermissionDenied extends Value(4 + 0) + case object UpdatePermissionDenied extends Value(5 * 1) + case object DeletePermissionDenied extends Value(6 * 1) + case object SetAclPermissionDenied extends Value(7 * 1) + case object CannotDeleteNonExistingPath extends Value(8 * 1) + case object PubCtsCheckFailed extends Value(9 * 1) + case object UserAlreadyExists extends Value(10 * 1) + case object UserDoesNotExist extends Value(11 * 1) + case object GroupAlreadyExists extends Value(12 * 1) + case object GroupDoesNotExist extends Value(13 * 1) + case object AclAlreadyExists extends Value(14 * 1) + case object AclDoesNotExist extends Value(15 * 1) + case object UserNotAMember extends Value(16 * 1) + case object GroupNotAMember extends Value(17 * 1) + case object UnknownQueryRequest extends Value(18 * 1) + case object InvalidSchemaJSON extends Value(19 * 1) + case object RelationAlreadyExists extends Value(10 + 10) + case object RelationDoesNotExist extends Value(21 * 1) + case object InvalidDataJSON extends Value(22 * 1) + case object InvalidTableQueryRequest extends Value(23 * 1) + case object InvalidAuthenticatorId extends Value(24 * 1) + case object SillyStyleChecker extends Value(5 * 5) +} diff --git a/core/src/main/scala/com/ibm/csync/types/RetryException.scala b/core/src/main/scala/com/ibm/csync/types/RetryException.scala new file mode 100644 index 0000000..87adbf2 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/types/RetryException.scala @@ -0,0 +1,19 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +case class RetryException(cause: Throwable) extends Exception diff --git a/core/src/main/scala/com/ibm/csync/types/SessionId.scala b/core/src/main/scala/com/ibm/csync/types/SessionId.scala new file mode 100644 index 0000000..c131658 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/types/SessionId.scala @@ -0,0 +1,27 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +import java.util.UUID + +case class SessionId(id: String) extends AnyVal { + +} + +object SessionId { + def apply(): SessionId = SessionId(UUID.randomUUID.toString) +} diff --git a/core/src/main/scala/com/ibm/csync/types/Token.scala b/core/src/main/scala/com/ibm/csync/types/Token.scala new file mode 100644 index 0000000..ad38f22 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/types/Token.scala @@ -0,0 +1,21 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +case class Token(s: String) extends AnyVal { + +} diff --git a/core/src/main/scala/com/ibm/csync/types/VTS.scala b/core/src/main/scala/com/ibm/csync/types/VTS.scala new file mode 100644 index 0000000..6ed9f35 --- /dev/null +++ b/core/src/main/scala/com/ibm/csync/types/VTS.scala @@ -0,0 +1,21 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +case class VTS(vts: Long) extends AnyVal { + +} diff --git a/core/src/test/scala/com/ibm/csync/auth/ValidateGithubTokenTests.scala b/core/src/test/scala/com/ibm/csync/auth/ValidateGithubTokenTests.scala new file mode 100644 index 0000000..c20cf38 --- /dev/null +++ b/core/src/test/scala/com/ibm/csync/auth/ValidateGithubTokenTests.scala @@ -0,0 +1,27 @@ +/* + * Copyright IBM Corporation 2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.auth.github + +import org.scalatest.{FunSuite, Matchers} + +class ValidateGitHubTokenTests extends FunSuite with Matchers { + test("Test bad github token") { + assertThrows[Exception] { + ValidateGitHubToken.validate("This is a bad token") + } + } +} diff --git a/core/src/test/scala/com/ibm/csync/auth/ValidateGoogleTokenTests.scala b/core/src/test/scala/com/ibm/csync/auth/ValidateGoogleTokenTests.scala new file mode 100644 index 0000000..7177b1f --- /dev/null +++ b/core/src/test/scala/com/ibm/csync/auth/ValidateGoogleTokenTests.scala @@ -0,0 +1,28 @@ +/* + * Copyright IBM Corporation 2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.auth.google + +import org.scalatest.{FunSuite, Matchers} + +class ValidateGoogleTokenTests extends FunSuite with Matchers { + + test("Test bad google token") { + assertThrows[Exception] { + ValidateGoogleToken.validate("This is a bad token") + } + } +} diff --git a/core/src/test/scala/com/ibm/csync/commands/AdvanceTests.scala b/core/src/test/scala/com/ibm/csync/commands/AdvanceTests.scala new file mode 100644 index 0000000..349e6d5 --- /dev/null +++ b/core/src/test/scala/com/ibm/csync/commands/AdvanceTests.scala @@ -0,0 +1,103 @@ +/* + * Copyright IBM Corporation 2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.commands + +import com.ibm.csync.database.Database +import com.ibm.csync.rabbitmq.Factory +import com.ibm.csync.session.Session +import com.ibm.csync.types.{SessionId, Token} +import org.postgresql.ds.PGPoolingDataSource +import org.scalatest.{FunSuite, Matchers} +import scala.concurrent.Future + +// scalastyle:off magic.number +class AdvanceTests extends FunSuite with Matchers { + + def fakeSession(f: Response => Future[_]): Session = { + + val ds = new PGPoolingDataSource() + ds.setCurrentSchema("pg_temp") + ds.setServerName("localhost") + ds.setMaxConnections(1) + Database.createTables(ds) + + val rabbit = Factory("amqp://guest:guest@localhost:5672/testing").newConnection + + Session(ds, "", rabbit, Some("demo"), + Token("demoToken"), SessionId())(f) + } + + test("Test Simple Advance") { + val session = fakeSession { _ => Future.successful(()) } + try { + Pub(99, Seq("a"), Some("x"), false, None, None).doit(session) + Pub(100, Seq("a"), Some("y"), false, None, None).doit(session) + Pub(101, Seq("a"), Some("yy"), false, None, None).doit(session) + Pub(102, Seq("a"), Some("yyy"), false, None, None).doit(session) + val lastVTS = Pub(103, Seq("a"), Some("yyyy"), false, None, None).doit(session).vts + val advanceResponse = Advance(1, Seq("a")).doit(session) + advanceResponse.maxvts should be(lastVTS) + advanceResponse.vts.head should be(lastVTS) + advanceResponse.vts.size should be(1) + } finally { + session.close() + } + } + + test("Test Advance with higher maxVTS then vts") { + val session = fakeSession { _ => Future.successful(()) } + try { + Pub(99, Seq("a"), Some("x"), false, None, None).doit(session) + Pub(100, Seq("a"), Some("y"), false, None, None).doit(session) + Pub(101, Seq("a"), Some("yy"), false, None, None).doit(session) + val lastAVTS = Pub(102, Seq("a"), Some("yyy"), false, None, None).doit(session).vts + val maxVTS = Pub(103, Seq("b"), Some("yyyy"), false, None, None).doit(session).vts + val advanceResponse = Advance(1, Seq("a")).doit(session) + advanceResponse.maxvts should be(maxVTS) + advanceResponse.vts.head should be(lastAVTS) + advanceResponse.vts.size should be(1) + } finally { + session.close() + } + } + + test("Test Advance that reaches limit") { + val session = fakeSession { _ => Future.successful(()) } + try { + Pub(99, Seq("a"), Some("x"), false, None, None).doit(session) + Pub(100, Seq("b"), Some("y"), false, None, None).doit(session) + Pub(101, Seq("c"), Some("yy"), false, None, None).doit(session) + Pub(102, Seq("d"), Some("yyy"), false, None, None).doit(session) + Pub(103, Seq("e"), Some("yyyy"), false, None, None).doit(session) + Pub(104, Seq("f"), Some("yyyyy"), false, None, None).doit(session) + Pub(105, Seq("g"), Some("yyyyyy"), false, None, None).doit(session) + Pub(106, Seq("h"), Some("yyyyyyy"), false, None, None).doit(session) + Pub(107, Seq("j"), Some("yyyyyyyy"), false, None, None).doit(session) + val tenthPubVTS = Pub(108, Seq("k"), Some("yyyyyyyyy"), false, None, None).doit(session).vts + Pub(109, Seq("l"), Some("yyyyyyyyyy"), false, None, None).doit(session) + val advanceResponse = Advance(1, Seq("*")).doit(session) + //vts starts at 1, so 10 entries should get us to 11 + advanceResponse.maxvts should be(tenthPubVTS) + //Max advance return is 10 + advanceResponse.vts.size should be(10) + } finally { + session.close() + } + } + //TODO Add advance tests for changed ACLs, updates and deletes + // scalastyle:on magic.number +} diff --git a/core/src/test/scala/com/ibm/csync/commands/FetchTests.scala b/core/src/test/scala/com/ibm/csync/commands/FetchTests.scala new file mode 100644 index 0000000..79f031a --- /dev/null +++ b/core/src/test/scala/com/ibm/csync/commands/FetchTests.scala @@ -0,0 +1,74 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.commands + +import com.ibm.csync.database.Database +import com.ibm.csync.rabbitmq.Factory +import com.ibm.csync.session.Session +import com.ibm.csync.types.{SessionId, Token} +import org.postgresql.ds.PGPoolingDataSource +import org.scalatest.{FunSuite, Matchers} +import scala.concurrent.Future + +class FetchTests extends FunSuite with Matchers { + // scalastyle:off magic.number + def fakeSession(f: Response => Future[_]): Session = { + + val ds = new PGPoolingDataSource() + ds.setCurrentSchema("pg_temp") + ds.setServerName("localhost") + ds.setMaxConnections(1) + Database.createTables(ds) + + val rabbit = Factory("amqp://guest:guest@localhost:5672/testing").newConnection + + Session(ds, "", rabbit, Some("demo"), + Token("demoToken"), SessionId())(f) + } + + test("Fetch a Single Node") { + val session = fakeSession { _ => Future.successful(()) } + try { + //Setup tests by publishing a key and getting it. + val pubResponse = Pub(102, Seq("d"), Some("x"), false, None, None).doit(session) + + //Check to be sure fetch is the key we published + val fetchResponse = Fetch(List(2)).doit(session) + fetchResponse.response.head.vts should be(pubResponse.vts) + fetchResponse.response.head.cts should be(pubResponse.cts) + fetchResponse.response.head.data should be(Some("x")) + fetchResponse.response.head.creator should be("demoUser") + fetchResponse.response.head.acl should be("$publicCreate") + } finally { + session.close() + } + } + + test("Fetch a non existant node") { + val session = fakeSession { _ => Future.successful(()) } + try { + //Check to be sure no fetch exists + val fetchResponse = Fetch(List(2)).doit(session) + fetchResponse.response.size should be(0) + } finally { + session.close() + } + } + + //TODO Add fetche tests for changed ACLs, updates and deletes + // scalastyle:on magic.number +} diff --git a/core/src/test/scala/com/ibm/csync/commands/PubTests.scala b/core/src/test/scala/com/ibm/csync/commands/PubTests.scala new file mode 100644 index 0000000..d8aed43 --- /dev/null +++ b/core/src/test/scala/com/ibm/csync/commands/PubTests.scala @@ -0,0 +1,318 @@ +/* + * Copyright IBM Corporation 2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.commands + +import com.ibm.csync.database.Database +import com.ibm.csync.rabbitmq.Factory +import com.ibm.csync.session.Session +import com.ibm.csync.types.{Key, SessionId, Token} +import org.postgresql.ds.PGPoolingDataSource +import org.scalatest.concurrent.ScalaFutures +import org.scalatest.{FunSuite, Matchers} + +import scala.collection.mutable +import scala.concurrent.{Future, Promise} + +class PubTests extends FunSuite with Matchers with ScalaFutures { + // scalastyle:off magic.number + def fakeSession(f: Response => Future[_]): Session = { + + val ds = new PGPoolingDataSource() + ds.setCurrentSchema("pg_temp") + ds.setServerName("localhost") + ds.setMaxConnections(1) + Database.createTables(ds) + + val rabbit = Factory("amqp://guest:guest@localhost:5672/testing").newConnection + + Session(ds, "", rabbit, Some("demo"), + Token("demoToken"), SessionId())(f) + } + + test("Publish a Parent and its child") { + + val promise = Promise[Map[Key, Data]]() + val responseData = mutable.Map[Key, Data]() + val session = fakeSession { outgoing => + outgoing match { + case d: Data => + val key = Key(d.path) + responseData(key) = d + if (responseData.keySet.size == 2) promise.success(responseData.toMap) + case _ => + } + Future.successful(()) + } + try { + Sub(Seq("#")).doit(session) + val aPubResponse = Pub(99, Seq("a"), Some("x"), false, None, None).doit(session) + val bPubResponse = Pub(100, Seq("a", "b"), Some("z"), false, None, None).doit(session) + + //Check pub responses + aPubResponse.code should be(0) + aPubResponse.vts should be(2) + aPubResponse.cts should be(99) + + bPubResponse.code should be(0) + bPubResponse.vts should be(3) + bPubResponse.cts should be(100) + + val res = promise.future.futureValue + res.size should be(2) + val a = res(Key("a")) + val b = res(Key("a", "b")) + + // A should have published before b, ensure this happened + a.vts should be < b.vts + //Make sure the cts we sent off are correct + a.cts should be(aPubResponse.cts) + a.vts should be(aPubResponse.vts) + a.data should be(Some("x")) + a.creator should be("demoUser") + a.acl should be("$publicCreate") + a.deletePath should be(false) + + b.cts should be(bPubResponse.cts) + b.vts should be(bPubResponse.vts) + b.data should be(Some("z")) + b.creator should be("demoUser") + b.acl should be("$publicCreate") + b.deletePath should be(false) + } finally { + session.close() + } + } + + test("Publish a single node and delete it") { + + val promise = Promise[Map[Key, Data]]() + val deletePromise = Promise[Map[Key, Data]]() + val responseData = mutable.Map[Key, Data]() + var promiseComplete = false + val session = fakeSession { outgoing => + outgoing match { + case d: Data => + val key = Key(d.path) + responseData(key) = d + if (responseData.keySet.size == 1 && !promiseComplete) { + promiseComplete = true + promise.success(responseData.toMap) + } + if (responseData(Key("c")).deletePath) { + deletePromise.success(responseData.toMap) + } + case _ => + } + Future.successful(()) + } + try { + Sub(Seq("#")).doit(session) + val createPubResponse = Pub(101, Seq("c"), Some("x"), false, None, None).doit(session) + + //Check Pub Response + createPubResponse.code should be(0) + createPubResponse.vts should be(2) + createPubResponse.cts should be(101) + + val res = promise.future.futureValue + res.size should be(1) + val key = res(Key("c")) + + //Check the key to be sure it is correct + key.cts should be(createPubResponse.cts) + key.vts should be(createPubResponse.vts) + key.data should be(Some("x")) + key.creator should be("demoUser") + key.acl should be("$publicCreate") + key.deletePath should be(false) + + //delete the key + val deletePubResponse = Pub(102, Seq("c"), Some("x"), true, None, None).doit(session) + + //Check Pub Response + deletePubResponse.code should be(0) + deletePubResponse.vts should be(3) + deletePubResponse.cts should be(102) + + val deleteRes = deletePromise.future.futureValue + deleteRes.size should be(1) + val deletedKey = deleteRes(Key("c")) + + //check the new key + deletedKey.cts should be(deletePubResponse.cts) + deletedKey.vts should be(deletePubResponse.vts) + deletedKey.creator should be("demoUser") + deletedKey.acl should be("$publicCreate") + deletedKey.deletePath should be(true) + } finally { + session.close() + } + } + + test("Publish a single node and update it with a different ACL") { + + val promise = Promise[Map[Key, Data]]() + val updatePromise = Promise[Map[Key, Data]]() + val responseData = mutable.Map[Key, Data]() + var promiseComplete = false + val session = fakeSession { outgoing => + outgoing match { + case d: Data => + val key = Key(d.path) + responseData(key) = d + if (responseData.keySet.size == 1 && !promiseComplete) { + promiseComplete = true + promise.success(responseData.toMap) + } else if (!responseData(key).deletePath) { + updatePromise.success(responseData.toMap) + } + case _ => + } + Future.successful(()) + } + try { + Sub(Seq("#")).doit(session) + val firstPubResponse = Pub(103, Seq("e"), Some("x"), false, None, None).doit(session) + + //Check Pub Response + firstPubResponse.code should be(0) + firstPubResponse.vts should be(2) + firstPubResponse.cts should be(103) + + val res = promise.future.futureValue + res.size should be(1) + val key = res(Key("e")) + + //Check the key to be sure it is correct + key.cts should be(firstPubResponse.cts) + key.vts should be(firstPubResponse.vts) + key.data should be(Some("x")) + key.creator should be("demoUser") + key.acl should be("$publicCreate") + key.deletePath should be(false) + + //pub the key again with changed data + val secondPubResponse = Pub(104, Seq("e"), Some("y"), false, Option("$publicReadWriteCreate"), None).doit(session) + + //Check Pub Response + secondPubResponse.code should be(0) + secondPubResponse.vts should be(4) //VTS increases by two due to changed ACL + secondPubResponse.cts should be(104) + + val updateRes = updatePromise.future.futureValue + updateRes.size should be(1) + val updateKey = updateRes(Key("e")) + + //check the new key + updateKey.cts should be(secondPubResponse.cts) + updateKey.vts should be(secondPubResponse.vts) + updateKey.creator should be("demoUser") + updateKey.acl should be("$publicReadWriteCreate") + updateKey.deletePath should be(false) + updateKey.data should be(Some("y")) + } finally { + session.close() + } + } + + test("Publish a single node and delete it then publish a replacement") { + + val promise = Promise[Map[Key, Data]]() + val deletePromise = Promise[Map[Key, Data]]() + val updatePromise = Promise[Map[Key, Data]]() + val responseData = mutable.Map[Key, Data]() + var promiseComplete = false + val session = fakeSession { outgoing => + outgoing match { + case d: Data => + val key = Key(d.path) + responseData(key) = d + if (responseData.keySet.size == 1 && !promiseComplete) { + promiseComplete = true + promise.success(responseData.toMap) + } else if (responseData(Key("f")).deletePath) { + deletePromise.success(responseData.toMap) + } else { + updatePromise.success(responseData.toMap) + } + case _ => + } + Future.successful(()) + } + try { + Sub(Seq("#")).doit(session) + val firstPubResponse = Pub(105, Seq("f"), Some("x"), false, None, None).doit(session) + + //Check Pub Response + firstPubResponse.code should be(0) + firstPubResponse.vts should be(2) + firstPubResponse.cts should be(105) + + val res = promise.future.futureValue + res.size should be(1) + val key = res(Key("f")) + + //Check the key to be sure it is correct + key.cts should be(firstPubResponse.cts) + key.vts should be(firstPubResponse.vts) + key.data should be(Some("x")) + key.creator should be("demoUser") + key.acl should be("$publicCreate") + key.deletePath should be(false) + + //delete the key + val secondPubResponse = Pub(106, Seq("f"), Some("x"), true, None, None).doit(session) + + //Check Pub Response + secondPubResponse.code should be(0) + secondPubResponse.vts should be(3) + secondPubResponse.cts should be(106) + + val deleteRes = deletePromise.future.futureValue + deleteRes.size should be(1) + val deletedKey = deleteRes(Key("f")) + //check the new key + deletedKey.cts should be(secondPubResponse.cts) + deletedKey.vts should be(secondPubResponse.vts) + deletedKey.creator should be("demoUser") + deletedKey.acl should be("$publicCreate") + deletedKey.deletePath should be(true) + + val thirdPubResponse = Pub(107, Seq("f"), Some("z"), false, None, None).doit(session) + + //Check Pub Response + thirdPubResponse.code should be(0) + thirdPubResponse.vts should be(4) + thirdPubResponse.cts should be(107) + + val updateRes = updatePromise.future.futureValue + updateRes.size should be(1) + val updateKey = updateRes(Key("f")) + + //check the new key + updateKey.cts should be(thirdPubResponse.cts) + updateKey.vts should be(thirdPubResponse.vts) + updateKey.creator should be("demoUser") + updateKey.acl should be("$publicCreate") + updateKey.deletePath should be(false) + updateKey.data should be(Some("z")) + } finally { + session.close() + } + } + // scalastyle:on magic.number +} diff --git a/core/src/test/scala/com/ibm/csync/commands/SubTests.scala b/core/src/test/scala/com/ibm/csync/commands/SubTests.scala new file mode 100644 index 0000000..42e9d7c --- /dev/null +++ b/core/src/test/scala/com/ibm/csync/commands/SubTests.scala @@ -0,0 +1,136 @@ +/* + * Copyright IBM Corporation 2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.commands + +import com.ibm.csync.database.Database +import com.ibm.csync.rabbitmq.Factory +import com.ibm.csync.session.Session +import com.ibm.csync.types.{Key, SessionId, Token} +import org.postgresql.ds.PGPoolingDataSource +import org.scalatest.concurrent.ScalaFutures +import org.scalatest.{FunSuite, Matchers} + +import scala.collection.mutable +import scala.concurrent.{Future, Promise} + +class SubTests extends FunSuite with Matchers with ScalaFutures { + // scalastyle:off magic.number + def fakeSession(f: Response => Future[_]): Session = { + + val ds = new PGPoolingDataSource() + ds.setCurrentSchema("pg_temp") + ds.setServerName("localhost") + ds.setMaxConnections(1) + Database.createTables(ds) + + val rabbit = Factory("amqp://guest:guest@localhost:5672/testing").newConnection + + Session(ds, "", rabbit, Some("demo"), + Token("demoToken"), SessionId())(f) + } + + test("Sub on a wildcard at the end") { + + val promise = Promise[Map[Key, Data]]() + val responseData = mutable.Map[Key, Data]() + val session = fakeSession { outgoing => + outgoing match { + case d: Data => + val key = Key(d.path) + responseData(key) = d + if (responseData.keySet.size == 2) { + promise.success(responseData.toMap) + } + case _ => + } + Future.successful(()) + } + try { + Sub(Seq("a", "*")).doit(session) + Pub(99, Seq("a"), Some("x"), false, None, None).doit(session) + val firstPubResponse = Pub(100, Seq("a", "b"), Some("y"), false, None, None).doit(session) + val secondPubResponse = Pub(101, Seq("a", "c"), Some("z"), false, None, None).doit(session) + val res = promise.future.futureValue + val firstKey = res(Key("a", "b")) + val secondKey = res(Key("a", "c")) + + firstKey.cts should be(firstPubResponse.cts) + firstKey.vts should be(firstPubResponse.vts) + firstKey.data should be(Some("y")) + firstKey.creator should be("demoUser") + firstKey.acl should be("$publicCreate") + firstKey.deletePath should be(false) + + secondKey.cts should be(secondPubResponse.cts) + secondKey.vts should be(secondPubResponse.vts) + secondKey.data should be(Some("z")) + secondKey.creator should be("demoUser") + secondKey.acl should be("$publicCreate") + secondKey.deletePath should be(false) + } finally { + session.close() + } + } + + test("Sub on a wildcard at the beginning") { + + val promise = Promise[Map[Key, Data]]() + val responseData = mutable.Map[Key, Data]() + val session = fakeSession { outgoing => + outgoing match { + case d: Data => + val key = Key(d.path) + responseData(key) = d + if (responseData.keySet.size == 2) { + promise.success(responseData.toMap) + } + case _ => + } + Future.successful(()) + } + try { + Sub(Seq("*", "d")).doit(session) + Pub(106, Seq("g", "d", "e"), Some("z"), false, None, None).doit(session) + Pub(107, Seq("c", "f", "d"), Some("z"), false, None, None).doit(session) + Pub(102, Seq("c"), Some("x"), false, None, None).doit(session) + val firstPubResponse = Pub(103, Seq("c", "d"), Some("y"), false, None, None).doit(session) + Pub(104, Seq("e"), Some("x"), false, None, None).doit(session) + val secondPubResponse = Pub(105, Seq("f", "d"), Some("z"), false, None, None).doit(session) + + val res = promise.future.futureValue + val firstKey = res(Key("c", "d")) + val secondKey = res(Key("f", "d")) + + firstKey.cts should be(firstPubResponse.cts) + firstKey.vts should be(firstPubResponse.vts) + firstKey.data should be(Some("y")) + firstKey.creator should be("demoUser") + firstKey.acl should be("$publicCreate") + firstKey.deletePath should be(false) + + secondKey.cts should be(secondPubResponse.cts) + secondKey.vts should be(secondPubResponse.vts) + secondKey.data should be(Some("z")) + secondKey.creator should be("demoUser") + secondKey.acl should be("$publicCreate") + secondKey.deletePath should be(false) + } finally { + session.close() + } + } + // scalastyle:on magic.number +} diff --git a/core/src/test/scala/com/ibm/csync/database/PostgresTests.scala b/core/src/test/scala/com/ibm/csync/database/PostgresTests.scala new file mode 100644 index 0000000..1231b1e --- /dev/null +++ b/core/src/test/scala/com/ibm/csync/database/PostgresTests.scala @@ -0,0 +1,60 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.database + +import java.sql.SQLException + +import org.postgresql.ds.PGPoolingDataSource +import org.scalatest.concurrent.ScalaFutures +import org.scalatest.{Matchers, PropSpec} + +class PostgresTests extends PropSpec with Matchers with ScalaFutures { + // scalastyle:off + //Println is throwing scalastyle errors, remove this when we remove them + property("create schema") { + + val ds = new PGPoolingDataSource + ds.setServerName("localhost") + ds.setCurrentSchema("pg_temp") + ds.setMaxConnections(1) + + Database.createTables(ds) + + val c = ds.getConnection() + try { + val s = c.createStatement() + try { + for (sql <- Database.getSQL) { + //println(s"$sql") + try { + val x = s.execute(sql) + // println(s"$x") + //println(s"${s.getUpdateCount()}") + } catch { + case ex: SQLException => //println(s"${ex.getSQLState}") + } + } + } finally { + s.close() + } + } finally { + c.close() + } + + } + // scalastyle:on +} diff --git a/core/src/test/scala/com/ibm/csync/rabbitmq/RabbitMQTests.scala b/core/src/test/scala/com/ibm/csync/rabbitmq/RabbitMQTests.scala new file mode 100644 index 0000000..a4942fb --- /dev/null +++ b/core/src/test/scala/com/ibm/csync/rabbitmq/RabbitMQTests.scala @@ -0,0 +1,42 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.rabbitmq + +import org.scalacheck.{Arbitrary} +import org.scalatest.{Matchers, PropSpec} +import org.scalatest.prop.{PropertyChecks} + +trait RabbitGenerators { + val genQueueInfo = for ( + id <- Arbitrary.arbString.arbitrary + ) yield QueueInfo( + id = id + ) + + //implicit val arbitraryQueueInfo = Arbitrary(genQueueInfo) +} + +class RabbitMQTests extends PropSpec with RabbitGenerators with PropertyChecks with Matchers { + // scalastyle:off null + property("queueInfo") { + forAll(genQueueInfo) { qinfo => + //Not sure what to test here, we need to write RabbitTests + qinfo.id shouldNot be(null) + } + } + // scalastyle:on null +} diff --git a/core/src/test/scala/com/ibm/csync/types/KeyTests.scala b/core/src/test/scala/com/ibm/csync/types/KeyTests.scala new file mode 100644 index 0000000..2199c08 --- /dev/null +++ b/core/src/test/scala/com/ibm/csync/types/KeyTests.scala @@ -0,0 +1,71 @@ +/* + * Copyright IBM Corporation 2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +import org.scalatest.{FunSuite, Matchers} + +class KeyTests extends FunSuite with Matchers { + // scalastyle:off line.size.limit + test("Null string cannot become a key") { + assertThrows[com.ibm.csync.types.ClientError] { + Key.apply(Seq("")) + } + } + + test("String with 17 parts should fail as max number of parts is 16") { + assertThrows[com.ibm.csync.types.ClientError] { + Key.apply(Seq("a", "b", "a", "b", "a", "b", "a", "b", "a", "b", " a", "b", "a", "b", "a", "b", "a")) + } + } + + test("String with 201 characters in one part should fail as max string length is 200") { + assertThrows[com.ibm.csync.types.ClientError] { + Key.apply(Seq("aikgosmjfuyfxdtrhiwyqwosoecrqcwnvzehfasfhgespaenogteuncxxddxeyowfcormgqrvnbeskblprelxlpolfvowpsibtolhcmypgnlnfekrqufckwrszusrwmhxeltcgzodnlnlxeualeqiaxbujdnfqalhzyatcqtmtpltlrbzkeohaibmqpowbcftpyicspca")) + } + } + + test("String with 200 characters in one part should pass as max string length is 200") { + try { + val key = Key.apply(Seq("ikgosmjfuyfxdtrhiwyqwosoecrqcwnvzehfasfhgespaenogteuncxxddxeyowfcormgqrvnbeskblprelxlpolfvowpsibtolhcmypgnlnfekrqufckwrszusrwmhxeltcgzodnlnlxeualeqiaxbujdnfqalhzyatcqtmtpltlrbzkeohaibmqpowbcftpyicspca")) + assert(key.asString == "ikgosmjfuyfxdtrhiwyqwosoecrqcwnvzehfasfhgespaenogteuncxxddxeyowfcormgqrvnbeskblprelxlpolfvowpsibtolhcmypgnlnfekrqufckwrszusrwmhxeltcgzodnlnlxeualeqiaxbujdnfqalhzyatcqtmtpltlrbzkeohaibmqpowbcftpyicspca") + } catch { + case _: Throwable => fail() + } + } + + test("String with 200 (201 with the .) characters in two parts should fail as max string length is 200") { + assertThrows[com.ibm.csync.types.ClientError] { + Key.apply(Seq("aikgosmjfuyfxdtrhiwyqwosoecrqcwnvzehfasfhgespaenogteun", "cxxddxeyowfcormgqrvnbeskblprelxlpolfvowpsibtolhcmypgnlnfekrqufckwrszusrwmhxeltcgzodnlnlxeualeqiaxbujdnfqalhzyatcqtmtpltlrbzkeohaibmqpowbcftpyicspca")) + } + } + + test("String with 199(200 with the .) characters in two parts should pass as max string length is 200 ") { + try { + val key = Key.apply(Seq("kgosmjfuyfxdtrhiwyqwosoecrqcwnvzeh", "fasfhgespaenogteuncxxddxeyowfcormgqrvnbeskblprelxlpolfvowpsibtolhcmypgnlnfekrqufckwrszusrwmhxeltcgzodnlnlxeualeqiaxbujdnfqalhzyatcqtmtpltlrbzkeohaibmqpowbcftpyicspca")) + assert(key.asString == "kgosmjfuyfxdtrhiwyqwosoecrqcwnvzeh.fasfhgespaenogteuncxxddxeyowfcormgqrvnbeskblprelxlpolfvowpsibtolhcmypgnlnfekrqufckwrszusrwmhxeltcgzodnlnlxeualeqiaxbujdnfqalhzyatcqtmtpltlrbzkeohaibmqpowbcftpyicspca") + } catch { + case _: Throwable => fail() + } + } + + test("String with no parts should throw error") { + assertThrows[com.ibm.csync.types.ClientError] { + val key = Key.apply(Seq()) + } + } + // scalastyle:on line.size.limit +} diff --git a/core/src/test/scala/com/ibm/csync/types/TestPart.scala b/core/src/test/scala/com/ibm/csync/types/TestPart.scala new file mode 100644 index 0000000..8fe9177 --- /dev/null +++ b/core/src/test/scala/com/ibm/csync/types/TestPart.scala @@ -0,0 +1,100 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +import org.scalacheck.{Gen} +import org.scalatest.prop.PropertyChecks +import org.scalatest.{Matchers, PropSpec} + +import scala.util.{Success, Try} + +object TestPart { + // scalastyle:off magic.number + val idInsideGen = Gen.listOf( + Gen.frequency( + 2 -> Gen.numChar, + 5 -> Gen.alphaLowerChar, + 4 -> Gen.alphaUpperChar, + 1 -> Gen.const('-'), + 1 -> Gen.const('_') + ) + ) + + val idGen = for ( + first <- Gen.alphaLowerChar; + rest <- idInsideGen + ) yield (first +: rest).mkString + + val idPartGen = idGen map { id => Identifier(id) } + + val innerGen = Gen.frequency( + 6 -> idPartGen, + 2 -> Star + ) + + val partGen = Gen.frequency( + 10 -> innerGen, + 1 -> Pound + ) + +} + +class TestPart extends PropSpec with PropertyChecks with Matchers { + + import TestPart._ + + property("basic parts") { + Star.asString should be("*") + Pound.asString should be("#") + forAll { (s: String) => + Identifier(s).asString should be(s) + } + } + + property("parse parts") { + + Seq("zebra", "Zebra", "aA-", "aA0_-") foreach { x => + Try(Part(x)) should be(Success(Identifier(x))) + } + + noException should be thrownBy { + Part("*") should be(Star) + Part("#") should be(Pound) + } + + Seq("?", "**", "abc^", "A b", "a*", "b#") foreach { x => + val e = the[ClientError] thrownBy { + Part(x) + } + e.code should be(ResponseCode.InvalidPathFormat) + e.msg should be(Some(x)) + } + + noException should be thrownBy { + forAll(idGen) { p => + Part(p) should be(Identifier(p)) + } + } + } + + property("random parts") { + forAll(partGen) { p => + p should be(Part(p.asString)) + } + } + // scalastyle:on magic.number +} diff --git a/core/src/test/scala/com/ibm/csync/types/TestPattern.scala b/core/src/test/scala/com/ibm/csync/types/TestPattern.scala new file mode 100644 index 0000000..cbc0160 --- /dev/null +++ b/core/src/test/scala/com/ibm/csync/types/TestPattern.scala @@ -0,0 +1,71 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.types + +import org.scalacheck.Gen +import org.scalatest.{Matchers, PropSpec} +import org.scalatest.prop.PropertyChecks + +import scala.util.{Failure, Success, Try} + +case class PatternInfo(vals: Seq[Option[String]], isStrict: Boolean) { + +} +// scalastyle:off magic.number null +object TestPattern { + + val starOrId = Gen.frequency( + (8, TestPart.idGen map { Some(_) }), + (2, None) + ) + val infoGen = for ( + n <- Gen.choose(0, 16); + lst <- Gen.listOf(starOrId); + isStrict <- Gen.oneOf(true, false) + ) yield PatternInfo(lst, isStrict) + +} + +class TestPattern extends PropSpec with PropertyChecks with Matchers { + + val patternGen = Gen.listOf(TestPart.partGen) map { new Pattern(_) } + + property("simple patterns") { + Try { Pattern(Seq[String]()) } match { + case Failure(e) => e should not be (null) + case Success(x) => fail(x.toString) + } + } + + property("arbitrary patterns") { + forAll(patternGen) { p => + p.asString should not be (null) + } + } + + property("arbitrary patterns to string") { + forAll(patternGen) { p => + p.asWhere should not be (null) + } + } + + property("query generator") { + //Need to properly test this + //printf("%s\n", Pattern(Seq("a", "*", "c", "#")).asWhere) + } + // scalastyle:on magic.number null +} diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 0000000..92adbbb --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,130 @@ +FROM openjdk:8-jdk + +#FROM https://github.com/docker-library/rabbitmq/blob/29121864d4892b2481706df023a53e31fececd02/Dockerfile + +# add our user and group first to make sure their IDs get assigned consistently, regardless of whatever dependencies get added +RUN groupadd -r rabbitmq && useradd -r -d /var/lib/rabbitmq -m -g rabbitmq rabbitmq + +# grab gosu for easy step-down from root +ENV GOSU_VERSION 1.7 +RUN set -x \ + && apt-get update && apt-get install -y --no-install-recommends ca-certificates wget && rm -rf /var/lib/apt/lists/* \ + && wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture)" \ + && wget -O /usr/local/bin/gosu.asc "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture).asc" \ + && export GNUPGHOME="$(mktemp -d)" \ + && gpg --keyserver ha.pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 \ + && gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu \ + && rm -r "$GNUPGHOME" /usr/local/bin/gosu.asc \ + && chmod +x /usr/local/bin/gosu \ + && gosu nobody true \ + && apt-get purge -y --auto-remove wget + +# Add the officially endorsed Erlang debian repository: +# See: +# - http://www.erlang.org/download.html +# - https://www.erlang-solutions.com/resources/download.html +RUN apt-key adv --keyserver ha.pool.sks-keyservers.net --recv-keys 434975BD900CCBE4F7EE1B1ED208507CA14F4FCA +RUN echo 'deb http://packages.erlang-solutions.com/debian jessie contrib' > /etc/apt/sources.list.d/erlang.list + +# install Erlang +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + erlang-asn1 \ + erlang-base-hipe \ + erlang-crypto \ + erlang-eldap \ + erlang-inets \ + erlang-mnesia \ + erlang-nox \ + erlang-os-mon \ + erlang-public-key \ + erlang-ssl \ + erlang-xmerl \ + && rm -rf /var/lib/apt/lists/* + +# get logs to stdout (thanks @dumbbell for pushing this upstream! :D) +ENV RABBITMQ_LOGS=- RABBITMQ_SASL_LOGS=- +# https://github.com/rabbitmq/rabbitmq-server/commit/53af45bf9a162dec849407d114041aad3d84feaf + +# http://www.rabbitmq.com/install-debian.html +# "Please note that the word testing in this line refers to the state of our release of RabbitMQ, not any particular Debian distribution." +RUN apt-key adv --keyserver ha.pool.sks-keyservers.net --recv-keys 0A9AF2115F4687BD29803A206B73A36E6026DFCA +RUN echo 'deb http://www.rabbitmq.com/debian testing main' > /etc/apt/sources.list.d/rabbitmq.list + +ENV RABBITMQ_VERSION 3.6.6 +ENV RABBITMQ_DEBIAN_VERSION 3.6.6-1 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + rabbitmq-server=$RABBITMQ_DEBIAN_VERSION \ + && rm -rf /var/lib/apt/lists/* + +# /usr/sbin/rabbitmq-server has some irritating behavior, and only exists to "su - rabbitmq /usr/lib/rabbitmq/bin/rabbitmq-server ..." +ENV PATH /usr/lib/rabbitmq/bin:$PATH + +RUN echo '[ { rabbit, [ { loopback_users, [ ] } ] } ].' > /etc/rabbitmq/rabbitmq.config + +# set home so that any `--user` knows where to put the erlang cookie +ENV HOME /var/lib/rabbitmq + +RUN mkdir -p /var/lib/rabbitmq /etc/rabbitmq \ + && chown -R rabbitmq:rabbitmq /var/lib/rabbitmq /etc/rabbitmq \ + && chmod 777 /var/lib/rabbitmq /etc/rabbitmq +VOLUME /var/lib/rabbitmq + +# add a symlink to the .erlang.cookie in /root so we can "docker exec rabbitmqctl ..." without gosu +RUN ln -sf /var/lib/rabbitmq/.erlang.cookie /root/ + +RUN ln -sf /usr/lib/rabbitmq/lib/rabbitmq_server-$RABBITMQ_VERSION/plugins /plugins + +COPY docker-entrypoint-rabbit.sh / + +EXPOSE 4369 5671 5672 25672 + +# FROM https://github.com/docker-library/postgres/blob/e4942cb0f79b61024963dc0ac196375b26fa60dd/9.6/Dockerfile +# explicitly set user/group IDs +RUN groupadd -r postgres && useradd -r -g postgres postgres + +# make the "en_US.UTF-8" locale so postgres will be utf-8 enabled by default +RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \ + && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 +ENV LANG en_US.utf8 + +RUN mkdir /docker-entrypoint-initdb.d + +RUN apt-key adv --keyserver ha.pool.sks-keyservers.net --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 + +ENV PG_MAJOR 9.6 +ENV PG_VERSION 9.6.1-2.pgdg80+1 + +RUN echo 'deb http://apt.postgresql.org/pub/repos/apt/ jessie-pgdg main' $PG_MAJOR > /etc/apt/sources.list.d/pgdg.list + +RUN apt-get update \ + && apt-get install -y postgresql-common \ + && sed -ri 's/#(create_main_cluster) .*$/\1 = false/' /etc/postgresql-common/createcluster.conf \ + && apt-get install -y \ + postgresql-$PG_MAJOR=$PG_VERSION \ + postgresql-contrib-$PG_MAJOR=$PG_VERSION \ + && rm -rf /var/lib/apt/lists/* + +# make the sample config easier to munge (and "correct by default") +RUN mv -v /usr/share/postgresql/$PG_MAJOR/postgresql.conf.sample /usr/share/postgresql/ \ + && ln -sv ../postgresql.conf.sample /usr/share/postgresql/$PG_MAJOR/ \ + && sed -ri "s!^#?(listen_addresses)\s*=\s*\S+.*!\1 = '*'!" /usr/share/postgresql/postgresql.conf.sample + +RUN mkdir -p /var/run/postgresql && chown -R postgres /var/run/postgresql + +ENV PATH /usr/lib/postgresql/$PG_MAJOR/bin:$PATH +ENV PGDATA /var/lib/postgresql/data +VOLUME /var/lib/postgresql/data + +COPY docker-entrypoint-postgres.sh / + +EXPOSE 5432 + +COPY csync.sh / +ENTRYPOINT ["/csync.sh"] +CMD [""] + +USER root + +EXPOSE 6005 diff --git a/docker/csync.sh b/docker/csync.sh new file mode 100755 index 0000000..b6c01c4 --- /dev/null +++ b/docker/csync.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +# +# Copyright IBM Corporation 2016-2017 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +echo "Starting RABBITMQ" +#work around to avoid error when restarting rabbit: invoke-rc.d: policy-rc.d denied execution of start. +sed -i -e 's/101/0/g' /usr/sbin/policy-rc.d +/docker-entrypoint-rabbit.sh rabbitmq-server & +sleep 5 +rabbitmq-plugins enable rabbitmq_management +rabbitmqctl stop +invoke-rc.d rabbitmq-server start +sleep 5 +echo "Finished starting RABBITMQ" + +find / -name rabbitmqctl -print +if [ -z $CSYNC_RABBITMQ_PASSWORD ]; then + echo "CSYNC_RABBITMQ_PASSWORD not specified, using default value" + export CSYNC_RABBITMQ_PASSWORD=guest +fi +if [ -z $CSYNC_RABBITMQ_USER ]; then + echo "CSYNC_RABBITMQ_USER not specified, using default value" + export CSYNC_RABBITMQ_USER=guest +fi + +rabbitmqctl add_user admin admin +rabbitmqctl set_user_tags admin administrator +rabbitmqctl add_user $CSYNC_RABBITMQ_USER $CSYNC_RABBITMQ_PASSWORD +rabbitmqctl add_vhost csync +rabbitmqctl set_permissions -p csync $CSYNC_RABBITMQ_USER ".*" ".*" ".*" +rabbitmqctl set_permissions -p csync admin ".*" ".*" ".*" + +echo "Starting postgres" +export LANG=en_US.utf8 +/docker-entrypoint-postgres.sh postgres & +sleep 7 +echo "Finished starting postgres" +ls -l /var/lib/postgresql/data + +rm -f /opt/docker/RUNNING_PID + +sed -i "s/YOUR_GOOGLE_CLIENT_ID_HERE/"$CSYNC_GOOGLE_CLIENT_IDS"/" /opt/docker/public/dataviewer/ui/bundle.js + +echo "Starting CSync" +export USER=postgres +su -m postgres -c /opt/docker/bin/csync + diff --git a/docker/docker-entrypoint-postgres.sh b/docker/docker-entrypoint-postgres.sh new file mode 100755 index 0000000..6f6ecba --- /dev/null +++ b/docker/docker-entrypoint-postgres.sh @@ -0,0 +1,126 @@ +#!/bin/bash + +# +# Copyright IBM Corporation 2016-2017 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -e + +set_listen_addresses() { + sedEscapedValue="$(echo "$1" | sed 's/[\/&]/\\&/g')" + sed -ri "s/^#?(listen_addresses\s*=\s*)\S+/\1'$sedEscapedValue'/" "$PGDATA/postgresql.conf" +} + +if [ "$1" = 'postgres' ]; then + mkdir -p "$PGDATA" + chown -R postgres "$PGDATA" + + chmod g+s /run/postgresql + chown -R postgres /run/postgresql + + # look specifically for PG_VERSION, as it is expected in the DB dir + if [ ! -s "$PGDATA/PG_VERSION" ]; then + gosu postgres initdb + + # check password first so we can output the warning before postgres + # messes it up + if [ "$POSTGRES_PASSWORD" ]; then + pass="PASSWORD '$POSTGRES_PASSWORD'" + authMethod=md5 + else + # The - option suppresses leading tabs but *not* spaces. :) + cat >&2 <<-'EOWARN' + **************************************************** + WARNING: No password has been set for the database. + This will allow anyone with access to the + Postgres port to access your database. In + Docker's default configuration, this is + effectively any other container on the same + system. + + Use "-e POSTGRES_PASSWORD=password" to set + it in "docker run". + **************************************************** + EOWARN + + pass= + authMethod=trust + fi + + { echo; echo "host all all 0.0.0.0/0 $authMethod"; } >> "$PGDATA/pg_hba.conf" + + set_listen_addresses '' # we're going to start up postgres, but it's not ready for use yet (this is initialization), so don't listen to the outside world yet + + gosu postgres "$@" & + pid="$!" + for i in {30..0}; do + if echo 'SELECT 1' | psql --username postgres &> /dev/null; then + break + fi + echo 'PostgreSQL init process in progress...' + sleep 1 + done + if [ "$i" = 0 ]; then + echo >&2 'PostgreSQL init process failed' + exit 1 + fi + + : ${POSTGRES_USER:=postgres} + : ${POSTGRES_DB:=$POSTGRES_USER} + + if [ "$POSTGRES_DB" != 'postgres' ]; then + psql --username postgres <<-EOSQL + CREATE DATABASE "$POSTGRES_DB" ; + EOSQL + echo + fi + + if [ "$POSTGRES_USER" = 'postgres' ]; then + op='ALTER' + else + op='CREATE' + fi + + psql --username postgres <<-EOSQL + $op USER "$POSTGRES_USER" WITH SUPERUSER $pass ; + EOSQL + echo + + echo + for f in /docker-entrypoint-initdb.d/*; do + case "$f" in + *.sh) echo "$0: running $f"; . "$f" ;; + *.sql) echo "$0: running $f"; psql --username postgres --dbname "$POSTGRES_DB" < "$f" && echo ;; + *) echo "$0: ignoring $f" ;; + esac + echo + done + + if ! kill -s TERM "$pid" || ! wait "$pid"; then + echo >&2 'PostgreSQL init process failed' + exit 1 + fi + + set_listen_addresses '*' + + echo + echo 'PostgreSQL init process complete; ready for start up.' + echo + fi + + exec gosu postgres "$@" +fi + +exec "$@" diff --git a/docker/docker-entrypoint-rabbit.sh b/docker/docker-entrypoint-rabbit.sh new file mode 100755 index 0000000..5d70246 --- /dev/null +++ b/docker/docker-entrypoint-rabbit.sh @@ -0,0 +1,297 @@ +#!/bin/bash + +# +# Copyright IBM Corporation 2016-2017 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -eu + +# allow the container to be started with `--user` +if [[ "$1" == rabbitmq* ]] && [ "$(id -u)" = '0' ]; then + if [ "$1" = 'rabbitmq-server' ]; then + chown -R rabbitmq /var/lib/rabbitmq + fi + exec gosu rabbitmq "$BASH_SOURCE" "$@" +fi + +# backwards compatibility for old environment variables +: "${RABBITMQ_SSL_CERTFILE:=${RABBITMQ_SSL_CERT_FILE:-}}" +: "${RABBITMQ_SSL_KEYFILE:=${RABBITMQ_SSL_KEY_FILE:-}}" +: "${RABBITMQ_SSL_CACERTFILE:=${RABBITMQ_SSL_CA_FILE:-}}" + +# "management" SSL config should default to using the same certs +: "${RABBITMQ_MANAGEMENT_SSL_CACERTFILE:=$RABBITMQ_SSL_CACERTFILE}" +: "${RABBITMQ_MANAGEMENT_SSL_CERTFILE:=$RABBITMQ_SSL_CERTFILE}" +: "${RABBITMQ_MANAGEMENT_SSL_KEYFILE:=$RABBITMQ_SSL_KEYFILE}" + +# https://www.rabbitmq.com/configure.html +sslConfigKeys=( + cacertfile + certfile + fail_if_no_peer_cert + keyfile + verify +) +managementConfigKeys=( + "${sslConfigKeys[@]/#/ssl_}" +) +rabbitConfigKeys=( + default_pass + default_user + default_vhost + hipe_compile +) +fileConfigKeys=( + management_ssl_cacertfile + management_ssl_certfile + management_ssl_keyfile + ssl_cacertfile + ssl_certfile + ssl_keyfile +) +allConfigKeys=( + "${managementConfigKeys[@]/#/management_}" + "${rabbitConfigKeys[@]}" + "${sslConfigKeys[@]/#/ssl_}" +) + +declare -A configDefaults=( + [management_ssl_fail_if_no_peer_cert]='false' + [management_ssl_verify]='verify_none' + + [ssl_fail_if_no_peer_cert]='true' + [ssl_verify]='verify_peer' +) + +haveConfig= +haveSslConfig= +haveManagementSslConfig= +for conf in "${allConfigKeys[@]}"; do + var="RABBITMQ_${conf^^}" + val="${!var:-}" + if [ "$val" ]; then + haveConfig=1 + case "$conf" in + ssl_*) haveSslConfig=1 ;; + management_ssl_*) haveManagementSslConfig=1 ;; + esac + fi +done +if [ "$haveSslConfig" ]; then + missing=() + for sslConf in cacertfile certfile keyfile; do + var="RABBITMQ_SSL_${sslConf^^}" + val="${!var}" + if [ -z "$val" ]; then + missing+=( "$var" ) + fi + done + if [ "${#missing[@]}" -gt 0 ]; then + { + echo + echo 'error: SSL requested, but missing required configuration' + for miss in "${missing[@]}"; do + echo " - $miss" + done + echo + } >&2 + exit 1 + fi +fi +missingFiles=() +for conf in "${fileConfigKeys[@]}"; do + var="RABBITMQ_${conf^^}" + val="${!var}" + if [ "$val" ] && [ ! -f "$val" ]; then + missingFiles+=( "$val ($var)" ) + fi +done +if [ "${#missingFiles[@]}" -gt 0 ]; then + { + echo + echo 'error: files specified, but missing' + for miss in "${missingFiles[@]}"; do + echo " - $miss" + done + echo + } >&2 + exit 1 +fi + +# set defaults for missing values (but only after we're done with all our checking so we don't throw any of that off) +for conf in "${!configDefaults[@]}"; do + default="${configDefaults[$conf]}" + var="RABBITMQ_${conf^^}" + [ -z "${!var:-}" ] || continue + eval "export $var=\"\$default\"" +done + +# If long & short hostnames are not the same, use long hostnames +if [ "$(hostname)" != "$(hostname -s)" ]; then + : "${RABBITMQ_USE_LONGNAME:=true}" +fi + +if [ "${RABBITMQ_ERLANG_COOKIE:-}" ]; then + cookieFile='/var/lib/rabbitmq/.erlang.cookie' + if [ -e "$cookieFile" ]; then + if [ "$(cat "$cookieFile" 2>/dev/null)" != "$RABBITMQ_ERLANG_COOKIE" ]; then + echo >&2 + echo >&2 "warning: $cookieFile contents do not match RABBITMQ_ERLANG_COOKIE" + echo >&2 + fi + else + echo "$RABBITMQ_ERLANG_COOKIE" > "$cookieFile" + chmod 600 "$cookieFile" + fi +fi + +# prints "$2$1$3$1...$N" +join() { + local sep="$1"; shift + local out; printf -v out "${sep//%/%%}%s" "$@" + echo "${out#$sep}" +} +indent() { + if [ "$#" -gt 0 ]; then + echo "$@" + else + cat + fi | sed 's/^/\t/g' +} +rabbit_array() { + echo -n '[' + case "$#" in + 0) echo -n ' ' ;; + 1) echo -n " $1 " ;; + *) + local vals="$(join $',\n' "$@")" + echo + indent "$vals" + esac + echo -n ']' +} +rabbit_env_config() { + local prefix="$1"; shift + + local ret=() + local conf + for conf; do + local var="rabbitmq${prefix:+_$prefix}_$conf" + var="${var^^}" + + local val="${!var:-}" + + local rawVal= + case "$conf" in + verify|fail_if_no_peer_cert) + [ "$val" ] || continue + rawVal="$val" + ;; + + hipe_compile) + [ "$val" ] && rawVal='true' || rawVal='false' + ;; + + cacertfile|certfile|keyfile) + [ "$val" ] || continue + rawVal='"'"$val"'"' + ;; + + *) + [ "$val" ] || continue + rawVal='<<"'"$val"'">>' + ;; + esac + [ "$rawVal" ] || continue + + ret+=( "{ $conf, $rawVal }" ) + done + + join $'\n' "${ret[@]}" +} + +if [ "$1" = 'rabbitmq-server' ] && [ "$haveConfig" ]; then + fullConfig=() + + rabbitConfig=( + "{ loopback_users, $(rabbit_array) }" + ) + + if [ "$haveSslConfig" ]; then + IFS=$'\n' + rabbitSslOptions=( $(rabbit_env_config 'ssl' "${sslConfigKeys[@]}") ) + unset IFS + + rabbitConfig+=( + "{ tcp_listeners, $(rabbit_array) }" + "{ ssl_listeners, $(rabbit_array 5671) }" + "{ ssl_options, $(rabbit_array "${rabbitSslOptions[@]}") }" + ) + else + rabbitConfig+=( + "{ tcp_listeners, $(rabbit_array 5672) }" + "{ ssl_listeners, $(rabbit_array) }" + ) + fi + + IFS=$'\n' + rabbitConfig+=( $(rabbit_env_config '' "${rabbitConfigKeys[@]}") ) + unset IFS + + fullConfig+=( "{ rabbit, $(rabbit_array "${rabbitConfig[@]}") }" ) + + # If management plugin is installed, then generate config consider this + if [ "$(rabbitmq-plugins list -m -e rabbitmq_management)" ]; then + if [ "$haveManagementSslConfig" ]; then + IFS=$'\n' + rabbitManagementSslOptions=( $(rabbit_env_config 'management_ssl' "${sslConfigKeys[@]}") ) + unset IFS + + rabbitManagementListenerConfig+=( + '{ port, 15671 }' + '{ ssl, true }' + "{ ssl_opts, $(rabbit_array "${rabbitManagementSslOptions[@]}") }" + ) + else + rabbitManagementListenerConfig+=( + '{ port, 15672 }' + '{ ssl, false }' + ) + fi + + fullConfig+=( + "{ rabbitmq_management, $(rabbit_array "{ listener, $(rabbit_array "${rabbitManagementListenerConfig[@]}") }") }" + ) + fi + + echo "$(rabbit_array "${fullConfig[@]}")." > /etc/rabbitmq/rabbitmq.config +fi + +combinedSsl='/tmp/combined.pem' +if [ "$haveSslConfig" ] && [[ "$1" == rabbitmq* ]] && [ ! -f "$combinedSsl" ]; then + # Create combined cert + cat "$RABBITMQ_SSL_CERTFILE" "$RABBITMQ_SSL_KEYFILE" > "$combinedSsl" + chmod 0400 "$combinedSsl" +fi +if [ "$haveSslConfig" ] && [ -f "$combinedSsl" ]; then + # More ENV vars for make clustering happiness + # we don't handle clustering in this script, but these args should ensure + # clustered SSL-enabled members will talk nicely + export ERL_SSL_PATH="$(erl -eval 'io:format("~p", [code:lib_dir(ssl, ebin)]),halt().' -noshell)" + export RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="-pa $ERL_SSL_PATH -proto_dist inet_tls -ssl_dist_opt server_certfile $combinedSsl -ssl_dist_opt server_secure_renegotiate true client_secure_renegotiate true" + export RABBITMQ_CTL_ERL_ARGS="$RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS" +fi + +exec "$@" diff --git a/npmgulp.sh b/npmgulp.sh new file mode 100755 index 0000000..5b02162 --- /dev/null +++ b/npmgulp.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +cd ./vertx/public/dataviewer/ && npm install && gulp diff --git a/project/build.properties b/project/build.properties new file mode 100644 index 0000000..b168a33 --- /dev/null +++ b/project/build.properties @@ -0,0 +1 @@ +sbt.version = 0.13.13 diff --git a/project/plugins.sbt b/project/plugins.sbt new file mode 100644 index 0000000..bcb79f8 --- /dev/null +++ b/project/plugins.sbt @@ -0,0 +1,16 @@ +logLevel := Level.Warn + +resolvers += "sonatype-releases" at "https://oss.sonatype.org/content/repositories/releases/" + +addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.1.5") + +//Auto Formatting Scalariform plugin. +//resolvers += Resolver.typesafeRepo("releases") + +addSbtPlugin("org.scalariform" % "sbt-scalariform" % "1.6.0") + +// The ScalaStyle plugin +addSbtPlugin("org.scalastyle" %% "scalastyle-sbt-plugin" % "0.8.0") + +// The SBT-Scoverage Code Coverage Plugin +addSbtPlugin("org.scoverage" % "sbt-scoverage" % "1.5.0") diff --git a/scalastyle-config.xml b/scalastyle-config.xml new file mode 100644 index 0000000..8971c87 --- /dev/null +++ b/scalastyle-config.xml @@ -0,0 +1,104 @@ + + Scalastyle standard configuration + + + + + + + + + true + + + + d + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vertx/public/__init__.py b/vertx/public/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vertx/public/dataviewer/DEPENDENCIES.md b/vertx/public/dataviewer/DEPENDENCIES.md new file mode 100644 index 0000000..0ded9ba --- /dev/null +++ b/vertx/public/dataviewer/DEPENDENCIES.md @@ -0,0 +1,17 @@ +| Module Name | Publisher | Date Published | Version | GitHub | License | +|:------------| ----------| ---------------| --------| -------| -------:| +| jquery | timmywil | 2016-09-22T22:32:49.360Z | 3.1.1 | [github.com/jquery/jquery](https://github.com/jquery/jquery) | [MIT](http://spdx.org/licenses/MIT) | +| gulp-minify-css | murphydanger | 2016-02-24T10:25:03.149Z | 1.2.4 | | [MIT](http://spdx.org/licenses/MIT) | +| knockout | mbest | 2016-11-08T07:13:32.816Z | 3.4.1 | [github.com/knockout/knockout](https://github.com/knockout/knockout) | [MIT](http://spdx.org/licenses/MIT) | +| split.js | nathancahill | 2016-12-29T17:56:16.783Z | 1.2.0 | [github.com/nathancahill/Split.js](https://github.com/nathancahill/Split.js) | [MIT](http://spdx.org/licenses/MIT) | +| jstree | vakata | 2016-10-31T09:53:31.253Z | 3.3.3 | [github.com/vakata/jstree](https://github.com/vakata/jstree) | [MIT](http://spdx.org/licenses/MIT) | +| gulp | phated | 2016-02-08T18:50:16.472Z | 3.9.1 | [github.com/gulpjs/gulp](https://github.com/gulpjs/gulp) | [MIT](http://spdx.org/licenses/MIT) | +| gulp-concat | contra | 2016-11-13T18:53:13.734Z | 2.6.1 | [github.com/contra/gulp-concat](https://github.com/contra/gulp-concat) | [MIT](http://spdx.org/licenses/MIT) | +| webworkify | anandthakker | 2016-09-07T15:16:32.281Z | 1.4.0 | [github.com/substack/webworkify](https://github.com/substack/webworkify) | [MIT](http://spdx.org/licenses/MIT) | +| mustache | dasilvacontin | 2016-11-08T16:25:18.753Z | 2.3.0 | [github.com/janl/mustache.js](https://github.com/janl/mustache.js) | [MIT](http://spdx.org/licenses/MIT) | +| gulp-streamify | nfroidure | 2015-09-07T10:30:56.550Z | 1.0.2 | [github.com/nfroidure/gulp-streamify](https://github.com/nfroidure/gulp-streamify) | [MIT](http://spdx.org/licenses/MIT) | +| vinyl-source-stream | hughsk | 2015-03-06T06:41:43.495Z | 1.1.0 | [github.com/hughsk/vinyl-source-stream](https://github.com/hughsk/vinyl-source-stream) | [MIT](http://spdx.org/licenses/MIT) | +| gulp-uglify | terinjokes | 2016-08-01T21:55:41.164Z | 2.0.0 | [github.com/terinjokes/gulp-uglify](https://github.com/terinjokes/gulp-uglify) | [MIT](http://spdx.org/licenses/MIT) | +| gulp-cli | phated | 2016-07-15T17:58:18.086Z | 1.2.2 | [github.com/gulpjs/gulp-cli](https://github.com/gulpjs/gulp-cli) | [MIT](http://spdx.org/licenses/MIT) | +| grunt | shama | 2016-04-05T18:16:49.769Z | 1.0.1 | [github.com/gruntjs/grunt](https://github.com/gruntjs/grunt) | [MIT](http://spdx.org/licenses/MIT) | +| browserify | substack | 2017-01-04T08:10:34.289Z | 13.3.0 | [github.com/substack/node-browserify](https://github.com/substack/node-browserify) | [MIT](http://spdx.org/licenses/MIT) | diff --git a/vertx/public/dataviewer/README.md b/vertx/public/dataviewer/README.md new file mode 100644 index 0000000..d334ddb --- /dev/null +++ b/vertx/public/dataviewer/README.md @@ -0,0 +1,43 @@ +#Csync Dataviewer + +The csync dataviewer visualizes the CSync hierarchy. + +###Accessing the Dataviewer + +The CSync Dataviewer is available on the same host and port that the CSync instance is run. For example, if you are running CSync locally on port 6005, the dataviewer is available on `localhost:6005` + + +###Using the Dataviewer + +```Add Node``` +To add a node, select a parent node and click the `Add Node` button on the right hand side. This will add a new node under the currently selected node and the name will be highlighted for you to type in. After typing in a new name for the node, hit enter to finish adding. + +Our naming conventions do not let you have spaces or any special characters in the node name. For example, "Hello There" and "hello!!!" will not work; "helloThere" will work. Also you will not be allowed to add a node name that currently already exists for that parent, so you will be prompted to change the name if that is the case. +**Note:** implementation for adding a root node is in progress + +```Delete Node``` +Select a node be be deleted and press the `Delete Node` button. **Note:** we only allow deleting leaf nodes. + +```Modify Node data``` +When a node is selected, the data associated with it (if any) will be shown in the Properties panel on the right. Click the `Edit` button on the right top corner to add/modify data for that node. Click `Save` to save. If you click away from the Properties section without saving, you will be prompted to finish editing, then continue. + + +### Dependency Table + +| Module Name | Publisher | Date Published | Version | GitHub | License | +|:------------| ----------| ---------------| --------| -------| -------:| +| jquery | timmywil | 2016-09-22T22:32:49.360Z | 3.1.1 | [github.com/jquery/jquery](https://github.com/jquery/jquery) | [MIT](http://spdx.org/licenses/MIT) | +| gulp-minify-css | murphydanger | 2016-02-24T10:25:03.149Z | 1.2.4 | | [MIT](http://spdx.org/licenses/MIT) | +| knockout | mbest | 2016-11-08T07:13:32.816Z | 3.4.1 | [github.com/knockout/knockout](https://github.com/knockout/knockout) | [MIT](http://spdx.org/licenses/MIT) | +| split.js | nathancahill | 2016-12-29T17:56:16.783Z | 1.2.0 | [github.com/nathancahill/Split.js](https://github.com/nathancahill/Split.js) | [MIT](http://spdx.org/licenses/MIT) | +| jstree | vakata | 2016-10-31T09:53:31.253Z | 3.3.3 | [github.com/vakata/jstree](https://github.com/vakata/jstree) | [MIT](http://spdx.org/licenses/MIT) | +| gulp | phated | 2016-02-08T18:50:16.472Z | 3.9.1 | [github.com/gulpjs/gulp](https://github.com/gulpjs/gulp) | [MIT](http://spdx.org/licenses/MIT) | +| gulp-concat | contra | 2016-11-13T18:53:13.734Z | 2.6.1 | [github.com/contra/gulp-concat](https://github.com/contra/gulp-concat) | [MIT](http://spdx.org/licenses/MIT) | +| webworkify | anandthakker | 2016-09-07T15:16:32.281Z | 1.4.0 | [github.com/substack/webworkify](https://github.com/substack/webworkify) | [MIT](http://spdx.org/licenses/MIT) | +| mustache | dasilvacontin | 2016-11-08T16:25:18.753Z | 2.3.0 | [github.com/janl/mustache.js](https://github.com/janl/mustache.js) | [MIT](http://spdx.org/licenses/MIT) | +| gulp-streamify | nfroidure | 2015-09-07T10:30:56.550Z | 1.0.2 | [github.com/nfroidure/gulp-streamify](https://github.com/nfroidure/gulp-streamify) | [MIT](http://spdx.org/licenses/MIT) | +| vinyl-source-stream | hughsk | 2015-03-06T06:41:43.495Z | 1.1.0 | [github.com/hughsk/vinyl-source-stream](https://github.com/hughsk/vinyl-source-stream) | [MIT](http://spdx.org/licenses/MIT) | +| gulp-uglify | terinjokes | 2016-08-01T21:55:41.164Z | 2.0.0 | [github.com/terinjokes/gulp-uglify](https://github.com/terinjokes/gulp-uglify) | [MIT](http://spdx.org/licenses/MIT) | +| gulp-cli | phated | 2016-07-15T17:58:18.086Z | 1.2.2 | [github.com/gulpjs/gulp-cli](https://github.com/gulpjs/gulp-cli) | [MIT](http://spdx.org/licenses/MIT) | +| grunt | shama | 2016-04-05T18:16:49.769Z | 1.0.1 | [github.com/gruntjs/grunt](https://github.com/gruntjs/grunt) | [MIT](http://spdx.org/licenses/MIT) | +| browserify | substack | 2017-01-04T08:10:34.289Z | 13.3.0 | [github.com/substack/node-browserify](https://github.com/substack/node-browserify) | [MIT](http://spdx.org/licenses/MIT) | diff --git a/vertx/public/dataviewer/__init__.py b/vertx/public/dataviewer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vertx/public/dataviewer/assets/add.png b/vertx/public/dataviewer/assets/add.png new file mode 100644 index 0000000..8ce7107 Binary files /dev/null and b/vertx/public/dataviewer/assets/add.png differ diff --git a/vertx/public/dataviewer/assets/analytics.png b/vertx/public/dataviewer/assets/analytics.png new file mode 100644 index 0000000..99cdf6c Binary files /dev/null and b/vertx/public/dataviewer/assets/analytics.png differ diff --git a/vertx/public/dataviewer/assets/data-viewer.png b/vertx/public/dataviewer/assets/data-viewer.png new file mode 100644 index 0000000..f4fc69a Binary files /dev/null and b/vertx/public/dataviewer/assets/data-viewer.png differ diff --git a/vertx/public/dataviewer/assets/delete.png b/vertx/public/dataviewer/assets/delete.png new file mode 100644 index 0000000..93cf9f0 Binary files /dev/null and b/vertx/public/dataviewer/assets/delete.png differ diff --git a/vertx/public/dataviewer/assets/edit-properties.png b/vertx/public/dataviewer/assets/edit-properties.png new file mode 100644 index 0000000..cf746dd Binary files /dev/null and b/vertx/public/dataviewer/assets/edit-properties.png differ diff --git a/vertx/public/dataviewer/config.js b/vertx/public/dataviewer/config.js new file mode 100644 index 0000000..38677c4 --- /dev/null +++ b/vertx/public/dataviewer/config.js @@ -0,0 +1,7 @@ +exports.useSSL = false; +exports.pathToListen = '#'; +exports.demoProvider = 'demo';//"YOUR_DEMO_PROVIDER +exports.provider = "google"; +exports.csyncDemoToken = 'demoToken';//"YOUR_CSYNC_DEMO__TOKEN_HERE"; +exports.googleClientId = "YOUR_GOOGLE_CLIENT_ID_HERE"; + diff --git a/vertx/public/dataviewer/css/dashboard.css b/vertx/public/dataviewer/css/dashboard.css new file mode 100644 index 0000000..947e9cb --- /dev/null +++ b/vertx/public/dataviewer/css/dashboard.css @@ -0,0 +1,237 @@ +body { + font-size: 18px; + font-family: Helvetica, Arial, sans-serif; + background-color: #f1f1f1; + width: 1000px; + margin: 0; + padding: 0; +} + +button { + background-color: #142936; + border: none; + color: white; + width: 230px; + padding: 10px 30px; + text-align: center; + text-decoration: none; + display: inline-block; + font-size: 15px; + margin: 5px 0px; + cursor: pointer; +} + +button.text-button { + margin-top: 20px; + margin-left: auto; + margin-right: auto; + display: block; + text-decoration: none; + width: 150px; + background-color: white; + color: #9B9B9B; + font-size: 14px; + outline: 0 none; + border: none; +} + +input { + font-size: 15px; + padding: 10px 20px; + border-top: 0; + border-left: 0; + border-right: 0; + outline-width: 0; +} + +button a:link{ + padding: 0em; + text-decoration: none; +} + +button a{ + color: white; +} + +a { + color: white; + cursor: pointer; +} + +.bold { + font-weight: bold; +} + +.flex-container { + display: flex; + flex-direction: column; + -webkit-flex-direction: column; + flex-wrap: nowrap; + justify-content: flex-start; + align-items: stretch; + align-content: stretch; + width: 100%; + height: 100%; +} + +.flex-container-nav { + position: fixed; + width: 100%; + min-height: 100px; + color: white; + -webkit-box-shadow: 0 8px 6px -6px #999; + -moz-box-shadow: 0 8px 6px -6px #999; + box-shadow: 0 8px 6px -6px #999; + display: -webkit-flex; + display: flex; + flex-direction: row; + background: #5294e9; + align-self: stretch; + -webkit-flex-flow: row wrap; +} + +.flex-container-nav a { + text-decoration: none; + display: block; +} + +.nav-bar { + display: flex; + color: #FFFFFF; +} + +.nav-bar-left { + justify-content: flex-start; + align-items: baseline; +} + +.nav-bar-left div { + transform: translateY(-25%); +} + +.nav-bar-left img { + padding: 1em; + transform: translateY(42%); +} + +.logo { + display: block; + padding: 2.1em; + background-color: #142936; +} +.my-instances { + padding: 0.7em; + font-size: 24px; + letter-spacing: 1px; +} + +.user { + position: absolute; + right: 35px; + padding: 1em; + font-size: 16px; + letter-spacing: 3px; + text-transform: uppercase; + transform: translateY(50%); +} +a:hover{ + color: gray; +} + +.flex-instances { + display: flex; + flex-direction: row; + flex-wrap: wrap; + justify-content: flex-start; + align-content: space-between; + align-items: baseline; + margin-top: 150px; +} + +.template-container{ + order: 0; + flex: 0 1 auto; + align-self: center; + flex-wrap: wrap; + position: relative; + z-index: 0; +} + + +.template { + order: 0; + flex: 0 1 auto; + align-self: center; + width: 180px; + height: 150px; + padding: 2em 4em 7em 4em; + margin: 1em; + background-color: white; +} + +.instance-template { + width: 200px; + height: 150px; +} + +.rename-screen { + position: absolute; +} + +.new-instance-template { + flex-direction: column; + align-items: center; + align-content: stretch; + display: flex; +} + +.instance-url { + font-size: 14px; + line-height: 1.5; + letter-spacing: 1px; + color: #222222; +} + +.csync-port { + font-size: 14px; + line-height: 65px; + letter-spacing: 1px; + color: #000000; +} + +.instance-inputs { + display: flex; + flex-direction: column; +} + +.new-instance-name, +.new-name { + margin-bottom: 110px; +} + +.instance-name { + order: 0; + flex: 1; + overflow: hidden; + color: #5294e9; + font-size: 24px; + letter-spacing: 1px; + text-overflow: ellipsis; + white-space: nowrap; +} +.dash { + color: #3e76c0; + font-size: 30px; + font-weight: 100; +} + +#cancel-button { + margin-left: auto; + margin-right: auto; + display: block; + text-decoration: none; +} + +#plus-icon { + cursor:pointer; +} \ No newline at end of file diff --git a/vertx/public/dataviewer/css/style.css b/vertx/public/dataviewer/css/style.css new file mode 100644 index 0000000..952e9ae --- /dev/null +++ b/vertx/public/dataviewer/css/style.css @@ -0,0 +1,151 @@ +body { + font-size: 18px; + font-family: SanFrancisco; + text-align: center; + color: darkgray; + margin: 0; + padding: 0; +} + +button{ + background-color: #9B9B9B; + border: none; + color: white; + padding: 10px 30px; + text-align: center; + text-decoration: none; + display: inline-block; + font-size: 16px; + margin: 10px 10px; + cursor: pointer; +} + +button a:link{ + padding: 0em; + text-decoration: none; +} + +button a{ + color: white; +} + +a { + color: darkgray; +} + +.flex-container { + display: flex; + flex-direction: column; + -webkit-flex-direction: column; + flex-wrap: nowrap; + justify-content: flex-start; + align-items: stretch; + align-content: stretch; + width: 100%; + height: 100%; +} + +.flex-container-nav { + position: fixed; + width: 100%; + -webkit-box-shadow: 0 8px 6px -6px #999; + -moz-box-shadow: 0 8px 6px -6px #999; + box-shadow: 0 8px 6px -6px #999; + display: -webkit-flex; + display: flex; + flex-direction: row; + background: #E7E7E7; + align-self: stretch; + -webkit-flex-flow: row wrap; + justify-content: space-between; +} + +.flex-container-nav a { + text-decoration: none; + display: block; + padding: 1em; +} + +.nav-bar { + display: flex; +} + +.logo { + display: block; + margin-left: 15px; + padding: 15px; + color: darkgray; +} + +.flex{ + padding-top: 20px; + display: flex; + flex-direction: column; + font-size: 20px; + align-self: stretch; + min-height: 300px; + background-color: white; +} + +.flex-2, .flex-4{ + background-color: #E7E7E7; +} + +.flex-1 { + padding-top: 100px; +} + +.flex-3 { + display: flex; + flex-direction: row; + align-self: flex-end; + align-items: center; + margin-right: -55px; +} + +.footer { + padding-right: 10px; + display: flex; + flex-direction: row; + background-color: gray; + height: 50px; + align-items: center; + align-self: stretch; + -webkit-flex-flow: row wrap; + justify-content: space-between; + position: relative; +} + +.footer a{ + text-decoration: none; + display: block; + padding: 0.3em; +} + +a:hover{ + color: white; +} + +.feature-columns { + display: flex; + flex-direction: row; + justify-content: center; + padding: 1em; + +} + +h5 { + padding-left: 5em; + padding-right: 5em; +} + +div.col{ + width: 300px; + padding: 0; + padding-top: 1em; + margin-left: 4em; + margin-right: 4em; + margin-bottom: 4em; + background-color: white; + display: block; +} \ No newline at end of file diff --git a/vertx/public/dataviewer/gulpfile.js b/vertx/public/dataviewer/gulpfile.js new file mode 100644 index 0000000..8751dfe --- /dev/null +++ b/vertx/public/dataviewer/gulpfile.js @@ -0,0 +1,51 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var gulp = require('gulp'); +var browserify = require('browserify'); +var source = require('vinyl-source-stream') +var streamify = require('gulp-streamify') +var concat = require('gulp-concat'); +var minifyCSS = require('gulp-minify-css'); +var uglify = require('gulp-uglify'); + +gulp.task('default', function(){ + gulp.start('build'); +}); + +gulp.task('package', function(){ + return browserify({entries: './src/dataModel.js', debug: true}) + .bundle() + .pipe(source('bundle.js')) + .pipe(streamify(uglify())) + .pipe(gulp.dest('./ui')); +}); + +gulp.task('css', function(){ + return gulp.src(['./node_modules/jstree/dist/themes/default/style.min.css', './ui/style.css']) + .pipe(minifyCSS()) + .pipe(concat('bundle.css')) + .pipe(gulp.dest('./ui')); +}); + +gulp.task('assets', function(){ + return gulp.src(['./node_modules/jstree/dist/themes/default/*.png', './node_modules/jstree/dist/themes/default/*.gif']) + .pipe(gulp.dest('./ui')); +}); + +gulp.task('build', ['package', 'css', 'assets'], function() { + process.exit(0); +}); diff --git a/vertx/public/dataviewer/index.html b/vertx/public/dataviewer/index.html new file mode 100644 index 0000000..5841953 --- /dev/null +++ b/vertx/public/dataviewer/index.html @@ -0,0 +1,68 @@ + + + + + + Csync Dataviewer + + + + + +
+

System Status

+
+
+

+
+ +
+
+ + + + \ No newline at end of file diff --git a/vertx/public/dataviewer/package.json b/vertx/public/dataviewer/package.json new file mode 100644 index 0000000..3c05d6f --- /dev/null +++ b/vertx/public/dataviewer/package.json @@ -0,0 +1,27 @@ +{ + "name": "CSyncDataViewer", + "version": "0.0.1", + "scripts": { + "dev-start": "gulp && nodemon app.js" + }, + "dependencies": { + "csync": "1.0.1", + "gulp": "~3.9.1", + "gulp-concat": "~2.6.0", + "gulp-minify-css": "~1.2.4", + "jquery": "~3.1.1", + "jstree": "~3.3.2", + "knockout": "~3.4.0", + "mustache": "^2.2.1", + "split.js": "~1.0.7", + "webworkify": "~1.4.0" + }, + "devDependencies": { + "browserify": "~13.1.0", + "grunt": "~0.4.1", + "gulp-cli": "1.2.2", + "gulp-streamify": "^1.0.2", + "gulp-uglify": "^2.0.0", + "vinyl-source-stream": "~1.1.0" + } +} diff --git a/vertx/public/dataviewer/src/PropertyViewModel.js b/vertx/public/dataviewer/src/PropertyViewModel.js new file mode 100644 index 0000000..e81e7a5 --- /dev/null +++ b/vertx/public/dataviewer/src/PropertyViewModel.js @@ -0,0 +1,177 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var ko = require('knockout'); +var $ = require('jquery'); +//UI Elements +var treeElem = $(".content"); +var editButton = $("#editButton"); +var saveButtonElem = $("#saveButton"); +var cancelButton = $("#cancelButton"); +var noDataText = $("#noDataText"); +var noACL = $("#noACL"); +var acl = $("#acl"); +var deleteButtonElem = $("#deleteButton"); +var header = document.getElementById("header-div"); +var treeDiv = document.getElementById("jstree"); +var leftPanel = document.getElementById("left-panel"); +var middlePanel = document.getElementById("middle"); +var selectedNode = {}; + +module.exports = function(tree, shouter, worker) { + var currVal = ""; + var propertiesElem = $('#node_properties'); + var self = this; + + //table ovservable + self.properties = ko.observableArray(); + self.editBox = ko.observable({}); + + //subscribe to tree's selected node + tree.selectedNode.subscribe(function (node) { + self.setInfo(node); + self.initKeyValue(node.data); + }); + + //publish to Tree delete node + self.deleteNode = function () { + var deleteObj = { + key: tree.selectedNode().id, + text: tree.selectedNode().text, + type: "delete_node" + } + worker.postMessage(deleteObj); + } + + //publish to Tree add node + self.addNodeButton = function (data) { + data.status = "valid"; + shouter.notifySubscribers(data, "addNode"); + $("#addNode").prop("disabled", true); + } + + self.editButton = function () { + var input = $("#editBox"); + treeElem.css({ 'color': '#3d3d3d' }); + var buttonName = editButton.text(); + currVal = input.val(); + editButton.hide(); + saveButtonElem.show(); + saveButtonElem.css("display", "flex"); + cancelButton.show(); + propertiesElem.hide(); + input.show(); + deleteButtonElem.prop("disabled", true); + $("#addNode").prop("disabled", true); + toggleDivs("none"); + } + + self.saveButton = function () { + toggleDivs("auto"); + var input = $("#editBox"); + treeElem.css({ 'color': '#3d3d3d' }); + editButton.prop("disabled", false); + $("#addNode").prop("disabled", false); + var data = {}; + saveButtonElem.hide(); + cancelButton.hide(); + editButton.show(); + data = input.val(); + propertiesElem.show(); + $('.dataValueInput').hide(); + input.hide(); + // write new info to csync + var writeObj = { + key: tree.selectedNode().id, + type: "update_data", + text: tree.selectedNode().text, + parent: tree.selectedNode().parent, + data: JSON.stringify(data) + }; + // send worker a write task + worker.postMessage(writeObj); + } + + self.cancelButton = function () { + //cancel any changes + var input = $("#editBox"); + saveButtonElem.hide(); + cancelButton.hide(); + editButton.show(); + treeElem.css({ 'color': '#3d3d3d' }); + propertiesElem.show(); + input.val = currVal; + input.hide(); + self.editBox(currVal); + deleteButtonElem.prop("disabled", false); + $("#addNode").prop("disabled", false); + toggleDivs("auto"); + } + + self.initKeyValue = function (data) { + self.editBox(JSON.stringify(data, null, 2)); + self.properties.removeAll(); + noDataText.hide(); + + if (data === null || (typeof data === "object" && Object.keys(data).length === 0)){ + noDataText.show(); + return; + } + if(Object.keys(data).length === 0){ + self.properties.push(new PropertyModel(tree.selectedNode().text, data)); + return; + } + for (var key in data) { + if (key === "0") { + self.properties.push(new PropertyModel(tree.selectedNode().text, data)); + return; + } + self.properties.push(new PropertyModel(key, data[key])); + } + self.properties.sort(); + } + + self.setInfo = function(node){ + if(node.original.acl !== undefined){ + noACL.hide(); + acl.text("ACL: " + node.original.acl); + acl.show(); + } + } + + function toggleDivs(command){ + //reset the unclickable elements + if(command === "auto"){ + middlePanel.removeEventListener("click", alertUser, false); + } + else { + middlePanel.addEventListener("click", alertUser, false); + } + treeDiv.style.pointerEvents = command; + leftPanel.style.pointerEvents = command; + header.style.pointerEvents = command; + } + + function alertUser() { + treeElem.css({ 'color': '#bdbdbd' }); + alert("Please finish editing. Click Cancel or Save to continue!"); + } +} +//data model for the properties table +function PropertyModel(key, value) { + self.keyValue = key; + self.dataValue = JSON.stringify(value, null, 2); +} \ No newline at end of file diff --git a/vertx/public/dataviewer/src/TreeModel.js b/vertx/public/dataviewer/src/TreeModel.js new file mode 100644 index 0000000..e501ee0 --- /dev/null +++ b/vertx/public/dataviewer/src/TreeModel.js @@ -0,0 +1,184 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var ko = require('knockout'); +var $ = require('jquery'); +var jstreeState = localStorage.getItem('jstree'); +var tree = $('#jstree'); + +module.exports = function(shouter, worker) { + this.selectedNode = ko.observable({}); + + //subscribe to addNode + shouter.subscribe(function (data) { + data.data = {}; + var id; + if(Object.keys(this.selectedNode()).length === 0){ + id = tree.jstree("create_node", "#", data, "last"); + } + else{ + id = tree.jstree("create_node", this.selectedNode(), data, "last"); + } + tree.jstree('open_node', this.selectedNode()); + tree.jstree(true).edit(id, "Node Name"); + }, this, "addNode"); + + //subscribe to rename binding (jstree) + shouter.subscribe(function (node) { + var csyncKey; + var newId; + if(node.parent === "#"){ + newId = node.text; + } + else{ + newId = node.parent + "." + node.text; + } + tree.jstree().set_id(node, newId); + //ensure sorted after add + var parentNode = tree.jstree().get_node(node.parent); + var position = getPosition(parentNode.children || [], node.id); + tree.jstree().move_node(node, parentNode, position); + tree.jstree().deselect_all(true); + tree.jstree().select_node(newId); + var writeObj = { + key: this.selectedNode().id, + type: "new_node", + text: node.text, + parent: node.parent + }; + // send worker a write task + worker.postMessage(writeObj); + }, this, "afterRename"); + + this.deleteNode = function(incomingData){ + //set selected node to parent + var currNode = tree.jstree().get_node(incomingData.key); + tree.jstree().delete_node(currNode); + if(incomingData.key === this.selectedNode().id){ + var parentID = this.selectedNode().parent; + var parentNode = tree.jstree().get_node(parentID); + tree.jstree().deselect_all(true); + tree.jstree().select_node(parentNode); + deleteNonExsistentParents(parentNode); + } + } + + //update data of existing node + this.updateNodeData = function(incomingData){ + //TODO csync writes parent node with undefined data + if(incomingData.data === undefined){ + return; + } + var node = tree.jstree().get_node(incomingData.key); + tree.jstree().get_node(incomingData.key).data = incomingData.data; + tree.jstree().get_node(incomingData.key).original.status = "valid"; + if (tree.jstree().get_selected()[0] === incomingData.key) { + //publish to update Properties + this.selectedNode(node); + } + } + + this.createAndAddNode = function(incomingData){ + var position = -1; + var keyArray = (incomingData.key).split("."); + //check if parents exist and create if not + for (var i = 0; i < keyArray.length; i++) { + var node = {}; + node.id = keyArray.slice(0, i+1).join("."); + node.status = "invalid"; + node.acl = incomingData.acl; + var curNode = tree.jstree().get_node(node.id); + if (!curNode){ + node.text = keyArray[i]; + node.parent = (keyArray.slice(0, i)).join("."); + node.data = {}; + if(i === keyArray.length-1){ + node.status = "valid"; + node.data = incomingData.data; + } + if(i === 0){ + node.parent = "#"; + position = getPosition(getRootNodes(), node.id); + tree.jstree().create_node(node.parent, node, position); + } + else { + var parentNode = tree.jstree().get_node(node.parent); + position = getPosition(parentNode.children || [], node.id); + tree.jstree().create_node(parentNode, node, position); + } + restoreState(node); + } + } + } + + function deleteNonExsistentParents(node){ + console.log("NODE: ", node); + var tempNode = node; + while(tempNode.id != "#"){ + var parent = tree.jstree().get_node(tempNode.parent); + if(tempNode.original.status != "valid"){ + tree.jstree().delete_node(tempNode); + tempNode = parent; + tree.jstree().deselect_all(true); + tree.jstree().select_node(tempNode); + } + else { + return; + } + } + } + + function getPosition(siblings, nodeId){ + var lo = 0; + var hi = siblings.length - 1; + while(lo <= hi ){ + var mid = (lo + (hi - lo) / 2) >> 0; + if (nodeId < siblings[mid]) hi = mid - 1; + else if (nodeId > siblings[mid]) lo = mid + 1; + else return mid; + } + return lo; + } + + function getRootNodes(){ + var siblings = tree.jstree().get_children_dom("#"); + var nodes = []; + for(var i=0; i h2 { + padding-left: 20px; + font-size: 24px; + font-weight: 100; + line-height: 1.5; + letter-spacing: 1px; + color: #ffffff; +} + +.flex-container-tree-properties { + display: flex; + flex-direction: row; + flex-wrap: row; + justify-content: flex-start; + align-items: flex-start; + align-content: stretch; + width: 100%; + height: 100%; +} + +.flex-item { + padding-left: 20px; +} + +.content { + border-top: 1px solid gray; + order: 0 flex: 0 1 auto; + align-self: auto; + width: 70%; + color: #3d3d3d; + height: 90vh; + overflow-y: auto; +} + +.sidebar-right { + border-top: 1px solid gray; + border-left: 1px solid gray; + word-wrap: break-all; + height: 100vh; + width: 40%; + overflow-x: scroll; + overflow-y: scroll; + order: 0; + flex: 0 1 auto; + align-self: auto; + background-color: #f6f6f6; + display: -webkit-flex; + display: flex; + -webkit-flex-direction: column; + flex-direction: column; + padding-right: 25px; +} + +.form { + order: 0 flex: 0 1 auto; + align-self: auto; + overflow: auto; + display: -webkit-flex; + display: flex; + -webkit-flex-direction: column; + flex-direction: column; + align-self: left; + align-items: stretch; + justify-content: space-between; +} + +.footer { + background: white; +} + +.noDataText { + vertical-align: middle; + display: inline-block; + padding: 10px; +} + +body { + font-size: 16px; + margin: 0; + font-family: Helvetica, Arial, sans-serif; +} + + +/*Vertical Nav bar Test*/ + +.nav-bar { + display: flex; + background-color: #1c364a; +} + +.nav-bar a { + display: block; + width: 100%; + color: #879298; + padding: 8px 0 8px 10px; + text-decoration: none; +} + +.nav-bar div.active { + border-left: 3px solid #FFF; +} + +.nav-bar div.active a { + color: #FFF +} + +.nav-bar div:hover:not(.active) { + border-left: 3px solid white; + color: white; +} + +.tree-container { + float: left; + width: 60%; + height: 1000px; +} + +.side-nav div { + background: green; + width: 10px; +} + +.jstree-default .jstree-anchor { + padding-left: 4px; +} + +.jstree-default .jstree-clicked { + background: #5294e9; + color: #FFF; +} + +.jstree-default .jstree-hovered { + background: #5294e9; + color: #FFF; +} + +.container-edit { + justify-content: space-between; + align-content: flex-start; + display: flex; + flex-direction: row; +} + +.container-edit h2 { + font-size: 24px; + font-weight: 500; + letter-spacing: 1px; + line-height: 36px; + color: #222222; +} + +.container-button { + display: flex; + flex-direction: row; + padding-bottom: 40px; +} + +button { + font-size: 14px; + outline: 0 none; + padding-top: 22px; + background-color: #f6f6f6; + border: none; + text-decoration: underline; +} + +input textarea { + border: 1px solid #DADADA; + color: #888; + outline: 0 none; + padding: 3px 3px 3px 5px; + font-size: 14px; + line-height: 15px; + box-shadow: inset 0px 1px 4px #ECECEC; + -moz-box-shadow: inset 0px 1px 4px #ECECEC; + -webkit-box-shadow: inset 0px 1px 4px #ECECEC; +} + +textarea { + height: 235px; + max-height: 450px; + resize: none; + font-family: 'Courier New', Courier, 'Lucida Sans Typewriter', 'Lucida Typewriter', monospace; +} + +#editBox { + width: 100%; + font-size: 13px; + color: #3d3d3d; +} + +pre { + font-size: 12px; + word-wrap: break-word; + white-space: pre-wrap; +} + +label { + word-wrap: break-all; +} + +.csync-info { + padding-bottom: 15px; + padding-top: 15px; + display: flex; + font-size: 20px; +} + +.csync-info div { + padding-left: 10px; +} + +#notif-bar { + margin-left: 30%; + position: absolute; + line-height: 2; + width: 50%; + background-color: #62D295; + text-align: center; + font-size: 12px; + overflow: hidden; + -webkit-box-shadow: 0 0 5px black; + -moz-box-shadow: 0 0 5px black; + box-shadow: 0 0 5px black; +} + +.gutter { + width: 5px; + background-color: #e5e5e5; + height: 100vh; + border-left: 1px solid gray; + border-top: 1px solid gray; +} + +.flex-container-tree-properties:hover .gutter { + width: 5px; + cursor: col-resize; + cursor: -moz-col-resize; + cursor: -webkit-col-resize; +} + +.host_port { + letter-spacing: 1px; + font-size: 14px; + color: #222222; +} + +.host_port_name { + font-size: 14px; + letter-spacing: 1px; + color: #9b9b9b; +} + +.acl-info h3 { + font-size: 16px; + font-weight: 500; + letter-spacing: 3px; + color: #5294e9; +} + +.signin { + color: #5294e9; + font-size: 24px; + font-family: Helvetica, Arial, sans-serif; + align-self: center; +} + +.acl-info div { + letter-spacing: 1px; + line-height: 24px; +} + +.save-button { + background: #142936; + margin-top: 25px; + color: #FFF; + height: 50px; + align-items: center; + justify-content: center; + width: 75%; + margin-left: auto; + margin-right: auto; + display: none; + font-size: 16px; + letter-spacing: 3px; + cursor: pointer; +} + +.cancel-button { + display: none; + margin-left: auto; + margin-right: auto; + color: #9b9b9b; + letter-spacing: 2px; + font-size: 12px; + cursor: pointer; +} + +#deleteButton { + cursor: pointer; +} + +.nodeManipulation img { + vertical-align: bottom; + margin-right: 10px; + cursor: pointer; +} + +#addNode { + margin-left: 25px; + cursor: pointer; +} + +#left-panel div { + padding-top: 5px; + padding-bottom: 5px; + margin-bottom: 5px; +} + +#left-panel div img { + margin-right: 19px; +} + +#dataViewer { + margin-top: 15px; +} + +.modal { + display: block; /* Hidden by default */ + position: fixed; /* Stay in place */ + z-index: 1; /* Sit on top */ + left: 0; + top: 0; + width: 100%; /* Full width */ + height: 100%; /* Full height */ + overflow: auto; /* Enable scroll if needed */ + background-color: rgb(0,0,0); /* Fallback color */ + background-color: rgba(0,0,0,0.4); /* Black w/ opacity */ +} + +.modal-content { + display: flex; + flex-direction: column; + background-color: #fefefe; + margin: 15% auto; /* 15% from the top and centered */ + padding: 20px; + border: 1px solid #888; + max-width: 288px; +} + +.providers { + padding-top: 30px; + margin-bottom: 10px; + align-self: center; +} + +.guest-login { + -moz-box-shadow:inset 0px 1px 0px 0px #ffffff; + -webkit-box-shadow:inset 0px 1px 0px 0px #ffffff; + box-shadow:inset 0px 1px 0px 0px #ffffff; + background:-webkit-gradient(linear, left top, left bottom, color-stop(0.05, #ffffff), color-stop(1, #f6f6f6)); + background:-moz-linear-gradient(top, #ffffff 5%, #f6f6f6 100%); + background:-webkit-linear-gradient(top, #ffffff 5%, #f6f6f6 100%); + background:-o-linear-gradient(top, #ffffff 5%, #f6f6f6 100%); + background:-ms-linear-gradient(top, #ffffff 5%, #f6f6f6 100%); + background:linear-gradient(to bottom, #ffffff 5%, #f6f6f6 100%); + filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffff', endColorstr='#f6f6f6',GradientType=0); + background-color:#ffffff; + -moz-border-radius:6px; + -webkit-border-radius:6px; + border-radius:6px; + border:1px solid #dcdcdc; + display:inline-block; + cursor:pointer; + color:#666666; + font-family:Arial; + font-size:15px; + font-weight:bold; + padding:6px 24px; + text-decoration:none; + text-shadow:0px 1px 0px #ffffff; + height: 36px; + width: 200px; + margin-top: 10px; +} +.guest-login:hover { + background:-webkit-gradient(linear, left top, left bottom, color-stop(0.05, #f6f6f6), color-stop(1, #ffffff)); + background:-moz-linear-gradient(top, #f6f6f6 5%, #ffffff 100%); + background:-webkit-linear-gradient(top, #f6f6f6 5%, #ffffff 100%); + background:-o-linear-gradient(top, #f6f6f6 5%, #ffffff 100%); + background:-ms-linear-gradient(top, #f6f6f6 5%, #ffffff 100%); + background:linear-gradient(to bottom, #f6f6f6 5%, #ffffff 100%); + filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#f6f6f6', endColorstr='#ffffff',GradientType=0); + background-color:#f6f6f6; +} +.guest-login:active { + position:relative; + top:1px; +} diff --git a/vertx/src/main/resources/simplelogger.properties b/vertx/src/main/resources/simplelogger.properties new file mode 100644 index 0000000..6e68fb9 --- /dev/null +++ b/vertx/src/main/resources/simplelogger.properties @@ -0,0 +1,30 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +org.slf4j.simpleLogger.defaultLogLevel=info + + +org.slf4j.simpleLogger.log.com.ibm.csync.commands.SqlStatement$=debug +org.slf4j.simpleLogger.log.main$=debug +org.slf4j.simpleLogger.log.com.ibm.csync.session.Session=debug +org.slf4j.simpleLogger.log.com.ibm.csync.database=debug + +org.slf4j.simpleLogger.showDateTime=false +#org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS Z + +org.slf4j.simpleLogger.showThreadName=true +#org.slf4j.simpleLogger.showLogName=true +org.slf4j.simpleLogger.showShortLogName=false \ No newline at end of file diff --git a/vertx/src/main/scala/com/ibm/csync/vertx/Envelope.scala b/vertx/src/main/scala/com/ibm/csync/vertx/Envelope.scala new file mode 100644 index 0000000..83a5e85 --- /dev/null +++ b/vertx/src/main/scala/com/ibm/csync/vertx/Envelope.scala @@ -0,0 +1,69 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.vertx + +import com.ibm.csync.commands +import com.ibm.csync.commands._ +import org.json4s.{Extraction, JValue, NoTypeHints} +import org.json4s.native.Serialization + +class Formats { + implicit protected val format = Serialization.formats(NoTypeHints) +} + +case class ResponseEnvelope(kind: String, closure: Option[JValue], payload: JValue, + version: Int = commands.MESSAGE_VERSION) extends Formats { + lazy val asString: String = Serialization.write(this) +} + +object ResponseEnvelope extends Formats { + def apply(closure: Option[JValue], msg: Response): ResponseEnvelope = { + val asJson = Extraction.decompose(msg) + ResponseEnvelope(msg.kind, closure, asJson) + } +} + +case class RequestEnvelope(version: Option[Int], kind: String, + closure: Option[JValue], payload: JValue) extends Formats { + + def check(): RequestEnvelope = { + version match { + case None => + throw new Exception("missing version") + case Some(x) => + if (x != commands.MESSAGE_VERSION) { + throw new Exception(s"bad version $x != ${commands.MESSAGE_VERSION}") + } + } + this + } + + lazy val asRequest: Command = kind match { + case "pub" => Extraction.extract[Pub](payload) + case "advance" => Extraction.extract[Advance](payload) + case "sub" => Extraction.extract[Sub](payload) + case "unsub" => Extraction.extract[Unsub](payload) + case "fetch" => Extraction.extract[Fetch](payload) + case _ => throw new Exception(s"unknown kind $kind") + } +} + +object RequestEnvelope extends Formats { + + def apply(s: String): RequestEnvelope = Serialization.read[RequestEnvelope](s).check() + +} \ No newline at end of file diff --git a/vertx/src/main/scala/com/ibm/csync/vertx/Main.scala b/vertx/src/main/scala/com/ibm/csync/vertx/Main.scala new file mode 100644 index 0000000..ba4a8cb --- /dev/null +++ b/vertx/src/main/scala/com/ibm/csync/vertx/Main.scala @@ -0,0 +1,236 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.vertx + +import javax.sql.DataSource + +import com.ibm.csync.commands.{Happy, Response} +import com.ibm.csync.database.Database +import com.ibm.csync.rabbitmq.{Constants, Factory} +import com.ibm.csync.session.Session +import com.ibm.csync.types.{ClientError, SessionId, Token} +import com.rabbitmq.client.Connection +import com.typesafe.scalalogging.LazyLogging +import com.zaxxer.hikari.{HikariConfig, HikariDataSource} +import io.vertx.core._ +import io.vertx.core.http._ +import org.json4s.JValue +import org.postgresql.ds.PGSimpleDataSource + +import scala.concurrent.{ExecutionContext, Future, Promise} + +object Main extends LazyLogging { + + def promiseHandler[T](p: Promise[T]): Handler[AsyncResult[T]] = + e => if (e.succeeded()) p.success(e.result()) else p.failure(e.cause()) + + implicit val currentThreadExecutionContext = new ExecutionContext { + + override def reportFailure(cause: Throwable): Unit = { + logger.error("ec", cause) + } + + override def execute(runnable: Runnable): Unit = { + runnable.run() + } + } + + var CSyncUUID: String = "" + + def sendResponse(ctx: VertxContext, closure: Option[JValue], res: Response, ws: ServerWebSocket): Future[_] = + closure match { + case Some(_) => + val s = ResponseEnvelope(closure, res).asString + logger.debug(s"sending reply $s") + ctx.runEventLoop { + ws.writeFinalTextFrame(s) + } + case None => Future.successful(()) + } + + def handleFrame(ctx: VertxContext, frame: WebSocketFrame, state: SessionState.Ref, ws: ServerWebSocket): Unit = + state.value.onFrame(state, frame) { (session, msg) => + logger.debug(s"request for ${session.sessionId}") + Future { + RequestEnvelope(msg) + } flatMap { env => + ctx.runBlocking { + env.asRequest.doit(session) + } recover { + case ClientError(code, err) => + Happy(code.id, err.getOrElse(code.name)) + } flatMap { r => + sendResponse(ctx, env.closure, r, ws) + } + } recoverWith { + case e => + logger.error("", e) + state.value.close(state) + } recover { + case e => + logger.error("", e) + } + } + + // Called once per web socket connect, runs for its side effects + def handleConnect(ctx: VertxContext, request: HttpServerRequest, ds: DataSource, + rabbitConnection: Connection): Future[_] = { + val ws = request.upgrade() + ws.pause // until we know what to do with incoming messages + val state = new SessionState.Ref(new SessionState.HasSocket(ctx, ws)) + logger.info(s"WebSocket connection ${ws.uri()}") + + ws.closeHandler { _ => + state.value.closeHandler(state).recover { + case e => + logger.error("close", e) + } + } + + ws.frameHandler { frame => + handleFrame(ctx, frame, state, ws) + } + + ctx.runBlocking { + val provider = Option(request.getParam("authProvider")) + val token = Token(request.getParam("token")) + val sessionId = SessionId(request.getParam("sessionId")) + Session(ds, CSyncUUID, rabbitConnection, provider, token, sessionId) { outgoing => + Future { + ResponseEnvelope(None, outgoing).asString + } flatMap { js => + logger.debug(s"sending back $js") + ctx.runEventLoop { ws.writeFinalTextFrame(js) } + // TODO: close socket + } recover { + case e => + logger.error(s"write to $sessionId failed", e) + } + } + } flatMap { session => + state.value.setSession(state, session) + } flatMap { _ => + ctx.runEventLoop { ws.resume } + } recoverWith { + case e => + logger.error("", e) + state.value.close(state) + } recover { + case e => + logger.error("", e) + } + } + + private def initPostgres: DataSource = { + // Postgres settings + val pgt = new PGSimpleDataSource() + if (sys.env.get("CSYNC_DB_STRING").isEmpty) { + pgt.setServerName("localhost") + } else { + pgt.setUrl(sys.env("CSYNC_DB_STRING")) + } + + val hikariConfig = new HikariConfig() + hikariConfig.setDataSource(pgt) + hikariConfig.setAutoCommit(false) + new HikariDataSource(hikariConfig) + } + + private def initRabbit: Connection = { + // RabbitMQ settings + val rabbitUri = sys.env.getOrElse("CSYNC_RABBITMQ_URI", "amqp://guest:guest@localhost:5672/csync") + + val rabbitFactory = Factory(rabbitUri) + + rabbitFactory.newConnection + } + + def main(args: Array[String]) { + System.setProperty( + "vertx.logger-delegate-factory-class-name", + classOf[io.vertx.core.logging.SLF4JLogDelegateFactory].getName + ) + + val vertx = Vertx.vertx() + val ds = initPostgres + val rabbitConnection = initRabbit + + val port = sys.env.getOrElse("CSYNC_PORT", "6005") + val serverOptions = new HttpServerOptions().setPort(port.toInt) + + def loop(n: Int): Future[_] = + if (n <= 0) { + Future.successful(()) + } else { + deploy(vertx, ds, rabbitConnection, serverOptions).flatMap { s => + logger.info(s"# $n listening on port ${s.actualPort()}") + loop(n - 1) + } + } + + // TODO: magic number + val nThreads = 10 + + Future { + CSyncUUID = Database.createTables(ds) + } flatMap { _ => + loop(nThreads) + } recover { + case e => + logger.error("initialization error", e) + vertx.close() + } recover { + case e => + logger.error("", e) + } + } + + private def deploy(vertx: Vertx, ds: DataSource, rabbitConnection: Connection, + serverOptions: HttpServerOptions): Future[HttpServer] = { + val out = Promise[HttpServer] + + vertx.deployVerticle(new AbstractVerticle { + override def start(): Unit = { + val ctx = VertxContext(vertx.getOrCreateContext()) + val server = vertx.createHttpServer(serverOptions) + + server.requestHandler { request => + def send(f: String) = { + val p = Promise[Void] + request.response.sendFile(s"public/dataviewer/$f", promiseHandler(p)) + p.future + } + + (request.path() match { + case "/connect" => handleConnect(ctx, request, ds, rabbitConnection) + case "/" => send("index.html") + // TODO: think about security + case x if x.contains("..") => send("") + case x => send(x) + }) recover { + case e => + logger.error("request", e) + } + } + + server.listen(promiseHandler(out)) + } + }) + + out.future + } +} diff --git a/vertx/src/main/scala/com/ibm/csync/vertx/SessionState.scala b/vertx/src/main/scala/com/ibm/csync/vertx/SessionState.scala new file mode 100644 index 0000000..173b9ea --- /dev/null +++ b/vertx/src/main/scala/com/ibm/csync/vertx/SessionState.scala @@ -0,0 +1,127 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.vertx + +import com.ibm.csync.session.Session +import com.typesafe.scalalogging.LazyLogging +import io.vertx.core.http.{ServerWebSocket, WebSocketFrame} + +import scala.concurrent.{ExecutionContext, Future} + +trait SessionState extends LazyLogging { + def closeHandler(ref: SessionState.Ref): Future[_] + def onFrame(ref: SessionState.Ref, frame: WebSocketFrame)(cb: (Session, String) => Any) + def close(ref: SessionState.Ref): Future[_] + def setSession(ref: SessionState.Ref, session: Session): Future[_] +} + +object SessionState extends LazyLogging { + + class Ref(init: SessionState) { + var value: SessionState = init + } + + /////////////////////////// + // No session, no socket // + /////////////////////////// + + object HasNothing extends SessionState { + override def closeHandler(ref: Ref): Future[_] = Future.successful(()) + + override def onFrame(ref: Ref, frame: WebSocketFrame)(cb: (Session, String) => Any): Unit = { + logger.error(s"dropping frame") + } + + override def close(ref: Ref): Future[_] = + Future.successful(()) + + // TODO: just report + override def setSession(ref: Ref, session: Session): Future[_] = + Future.failed(new IllegalStateException("session not expected")) + } + + //////////////////////// + // No session, socket // + //////////////////////// + + class HasSocket(ctx: VertxContext, val ws: ServerWebSocket) extends SessionState { + override def closeHandler(ref: Ref): Future[_] = ctx.runEventLoop { + ref.value = HasNothing + } + + override def onFrame(ref: Ref, frame: WebSocketFrame)(cb: (Session, String) => Any): Unit = { + logger.debug(s"dropping frame") + } + + override def close(ref: Ref): Future[_] = + ctx.runEventLoop { + ref.value = HasNothing + ws.close() + } + + override def setSession(ref: Ref, session: Session): Future[_] = ctx.runEventLoop { + ref.value = new HasSession(ctx, ws, session) + } + } + + //////////////////////// + // Session and socket // + //////////////////////// + + class HasSession(ctx: VertxContext, ws: ServerWebSocket, session: Session) extends SessionState { + var message = new StringBuilder + + implicit val ec: ExecutionContext = Main.currentThreadExecutionContext + + override def closeHandler(ref: Ref): Future[_] = + ctx.runEventLoop { + ref.value = HasNothing + } flatMap { _ => + ctx.runBlocking { + session.close() + } + } + + override def onFrame(ref: Ref, frame: WebSocketFrame)(cb: (Session, String) => Any): Unit = { + val s = if (frame.isText) frame.textData else new String(frame.binaryData.getBytes) + message.append(s) + if (frame.isFinal) { + val x = message.toString + message = new StringBuilder + cb(session, x) + } + } + + override def close(ref: Ref): Future[_] = { + logger.info(s"closing session $session") + ctx.runEventLoop { + ref.value = HasNothing + } flatMap { _ => + ctx.runBlocking { + session.close() + } + } recover { + case _ => () + } flatMap { _ => + ctx.runEventLoop { ws.close() } + } + } + + override def setSession(ref: Ref, session: Session): Nothing = + throw new IllegalStateException("session not expected") + } +} \ No newline at end of file diff --git a/vertx/src/main/scala/com/ibm/csync/vertx/VertxContext.scala b/vertx/src/main/scala/com/ibm/csync/vertx/VertxContext.scala new file mode 100644 index 0000000..2a74bce --- /dev/null +++ b/vertx/src/main/scala/com/ibm/csync/vertx/VertxContext.scala @@ -0,0 +1,60 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.csync.vertx + +import io.vertx.core.Context + +import scala.concurrent.{Future, Promise} +import scala.util.{Failure, Success, Try} + +case class VertxContext(ctx: Context) { + + def runBlocking[A](f: => A): Future[A] = { + if (Context.isOnWorkerThread) { + Future.fromTry(Try(f)) + } else { + val p = Promise[A] + ctx.executeBlocking[A]( + (_: io.vertx.core.Future[A]) => { + Try(f) match { + case Success(a) => p.success(a) + case Failure(e) => p.failure(e) + } + () + }, + null + ) + p.future + } + } + + def runEventLoop[A](f: => A): Future[A] = { + if (Context.isOnEventLoopThread) { + Future.fromTry(Try(f)) + } else { + val p = Promise[A] + ctx.runOnContext { _ => + Try(f) match { + case Success(a) => p.success(a) + case Failure(e) => p.failure(e) + } + } + p.future + } + } + +} diff --git a/vertx/src/test/scala/t1.scala b/vertx/src/test/scala/t1.scala new file mode 100644 index 0000000..4c48868 --- /dev/null +++ b/vertx/src/test/scala/t1.scala @@ -0,0 +1,107 @@ +/* + * Copyright IBM Corporation 2016-2017 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import com.ibm.csync.vertx.{Main, VertxContext} +import io.vertx.core.{AbstractVerticle, Context, Vertx} +import org.scalatest.concurrent.ScalaFutures +import org.scalatest.{Matchers, PropSpec} + +import scala.concurrent.Promise + +class T1 extends PropSpec with Matchers with ScalaFutures { + + /* + def show(vertx: Vertx): Unit = { + val ctx = vertx.getOrCreateContext() + println(s"$ctx") + println(s"worker ${ctx.isWorkerContext}") + println(s"eventloop ${ctx.isEventLoopContext}") + println(s"id ${Thread.currentThread().getId()}") + println(s"workerThread ${Context.isOnWorkerThread}") + println(s"eventLoopThread ${Context.isOnEventLoopThread}") + println(s"vertxThread ${Context.isOnVertxThread}") + println(s"threadName ${Thread.currentThread().getName}") + } + */ + + property("verticle") { + //printf("**** starting test\n") + val vertx = Vertx.vertx() + implicit val ec = Main.currentThreadExecutionContext + val promise = Promise[Int] + + vertx.deployVerticle(new AbstractVerticle { + //printf("***** inside verticle\n") + + override def start() { + //printf("****** hello\n") + val topContext = vertx.getOrCreateContext + val ctx = VertxContext(topContext) + val topThread = Thread.currentThread + + val p1 = Promise[Int] + + topContext.isEventLoopContext should be(true) + topContext.isWorkerContext should be(false) + Context.isOnEventLoopThread should be(true) + Context.isOnWorkerThread should be(false) + Context.isOnVertxThread should be(true) + + val f = ctx.runBlocking { + //printf("************ inside runBlocking\n") + vertx.getOrCreateContext should be(topContext) + Context.isOnEventLoopThread should be(false) + Context.isOnWorkerThread should be(true) + Context.isOnVertxThread should be(true) + Thread.currentThread shouldNot be(topThread) + } + + f.map { _ => + //printf("**** inside map\n") + vertx.getOrCreateContext should be(topContext) + Context.isOnEventLoopThread should be(false) + Context.isOnWorkerThread should be(true) + Context.isOnVertxThread should be(true) + Thread.currentThread shouldNot be(topThread) + + topContext.runOnContext { _ => + //printf("*** inside runOnContext") + vertx.getOrCreateContext should be(topContext) + Context.isOnEventLoopThread should be(true) + Context.isOnWorkerThread should be(false) + Context.isOnVertxThread should be(true) + Thread.currentThread should be(topThread) + p1.success(1) + } + } + + p1.future.map { t => + vertx.getOrCreateContext should be(topContext) + Context.isOnEventLoopThread should be(true) + Context.isOnWorkerThread should be(false) + Context.isOnVertxThread should be(true) + Thread.currentThread should be(topThread) + promise.success(t + 1) + } + } + }) + + //Thread.sleep(10000000) + + promise.future.futureValue should be(2) + + } +}