Skip to content

Commit

Permalink
Torrent Suite Software 5.12.1 SDK docs
Browse files Browse the repository at this point in the history
  • Loading branch information
ipan committed Dec 27, 2019
1 parent 1723277 commit 4983f5a
Show file tree
Hide file tree
Showing 93 changed files with 5,899 additions and 6,254 deletions.
6 changes: 6 additions & 0 deletions build_sdk_docs.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#!/bin/bash

repo_name='iontorrent'
image_name='sdk-docs-builder'

docker run -v $(pwd):/src $repo_name/$image_name
9 changes: 9 additions & 0 deletions docker_build.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
#!/bin/bash
repo_name="iontorrent"
image_name="sdk-docs-builder"

#build docker file
# n.b.: assume Dockerfile is in same dir
if docker build --rm -t $repo_name/$image_name .; then
echo "$image_name is successfully built."
fi
11 changes: 11 additions & 0 deletions docker_push.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#!/bin/bash
set -e

repo_name="iontorrent"
image_name="sdk-docs-builder"

for name in $(docker images --format '{{.Repository}}:{{.Tag}}'); do
if [[ $name =~ $repo_name/$image_name:.* ]]; then
docker push $name
fi
done
222 changes: 183 additions & 39 deletions generate_api_docs.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,23 @@
#!/usr/bin/env python
import requests
import urlparse
import json
import os

try:
from urllib import parse as urlparse
except ImportError:
import urlparse


from settings import TS_URL, TS_USERNAME, TS_PASSWORD


# modified from http://stackoverflow.com/a/12539081/56069
def make_table(grid):
max_cols = [max(out) for out in map(list, zip(*[[len(item) for item in row] for row in grid]))]
max_cols = [
max(out)
for out in map(list, zip(*[[len(item) for item in row] for row in grid]))
]
rst = table_div(max_cols, 1)

for i, row in enumerate(grid):
Expand Down Expand Up @@ -47,21 +55,95 @@ def lower_case_to_title_case(string):
This function takes text like 'bigredtruck' and turns it into 'big red truck' by magic.
"""
string = string.replace("_", "").lower()
dictionary = sorted([
"active", "adapter", "analysis", "analysis", "annotation", "appl", "application", "args", "attribute",
"available", "basic", "chef", "cluster", "common", "composite", "content", "cv", "data", "db", "dna", "email",
"event", "experiment", "file", "flow", "genome", "global", "group", "history", "info", "ion", "item", "job",
"kit", "lib", "library", "location", "log", "management", "metrics", "mesh", "monitor", "node", "onetouch", "pgm",
"plan", "planned", "plugin", "prep", "prime", "product", "project", "proton", "qc", "quality", "reference",
"result", "results", "run", "sample", "sequencing", "server", "set", "settings", "suite", "summary", "support",
"support", "template", "tf", "three", "torrent", "type", "upload", "get", "script","info",
], key=len, reverse=True)
dictionary = sorted(
[
"active",
"adapter",
"analysis",
"analysis",
"annotation",
"appl",
"application",
"args",
"attribute",
"available",
"basic",
"chef",
"cluster",
"common",
"composite",
"content",
"cv",
"data",
"db",
"dna",
"email",
"event",
"experiment",
"file",
"flow",
"genome",
"global",
"group",
"history",
"info",
"ion",
"item",
"job",
"kit",
"lib",
"library",
"location",
"log",
"management",
"metrics",
"mesh",
"monitor",
"node",
"onetouch",
"pgm",
"plan",
"planned",
"plugin",
"prep",
"prime",
"product",
"project",
"proton",
"qc",
"quality",
"reference",
"result",
"results",
"run",
"sample",
"sequencing",
"server",
"set",
"settings",
"suite",
"summary",
"support",
"support",
"template",
"tf",
"three",
"torrent",
"type",
"upload",
"get",
"script",
"info",
],
key=len,
reverse=True,
)
found_words = []
for _ in range(0, 10):
for word in dictionary:
if string.startswith(word):
found_words.append(word)
string = string[len(word):]
string = string[len(word) :]
break
found_words.append(string)
return " ".join(found_words)
Expand All @@ -80,7 +162,8 @@ def lower_case_to_title_case(string):
"ionreporter", # 500
"account", # 401
"prepopulatedplanningsession", # 500
"obsoletereferencegenome" # Obsolete
"obsoletereferencegenome", # Obsolete
"getsoftwareinfo",
]
API_VERSION = "v1"
DEMO_API_BASE_URL = "http://mytorrentserver/rundb/api/v1/"
Expand All @@ -98,67 +181,126 @@ def lower_case_to_title_case(string):
resource_doc_paths = []

# Write out docs pages for each API resource
for resource_name, resource_values in main_schema_request.json().items():
for resource_name, resource_values in list(main_schema_request.json().items()):
if resource_name not in EXCLUDE_RESOURCES:

# Get schema for the current resource
print "Using TS API to fetch %s schema..." % resource_name
print("Using TS API to fetch %s schema..." % resource_name)
single_resource_schema_request = requests.get(
urlparse.urljoin(API_BASE_URL, resource_values["schema"]),
params={"format": "json"}, auth=auth)
print single_resource_schema_request.url
params={"format": "json"},
auth=auth,
)
print(single_resource_schema_request.url)
single_resource_schema_request.raise_for_status()

# Make a request to get an example response
single_resource_demo_request = requests.get(
urlparse.urljoin(API_BASE_URL, resource_values["list_endpoint"]) + DEMO_URL_ARGS, auth=auth)
urlparse.urljoin(API_BASE_URL, resource_values["list_endpoint"])
+ DEMO_URL_ARGS,
auth=auth,
)
single_resource_demo_request.raise_for_status()

# We get warnings if the manual files do not exist. Try to create them now if they do not exist
manual_resource_doc_path = os.path.join(MANUAL_API_DOCS_OUTPUT_PATH, resource_name.lower() + ".rst")
manual_resource_doc_path = os.path.join(
MANUAL_API_DOCS_OUTPUT_PATH, resource_name.lower() + ".rst"
)
if not os.path.exists(manual_resource_doc_path):
print "Missing manual rst file for %s resource. Creating one now." % resource_name
print(
"Missing manual rst file for %s resource. Creating one now."
% resource_name
)
with open(manual_resource_doc_path, "w+") as output_file:
output_file.write("")

# Generate an rst for the resource
resource_doc_path = os.path.join(API_DOCS_OUTPUT_PATH, resource_name.lower() + ".rst")
resource_doc_path = os.path.join(
API_DOCS_OUTPUT_PATH, resource_name.lower() + ".rst"
)

with open(resource_doc_path, "w+") as output_file:
json_formatted_demo = json.dumps(single_resource_demo_request.json(), indent=4)
json_formatted_demo = "\n".join(["\t" + json_line for json_line in json_formatted_demo.split("\n")])

json_formatted_schema = json.dumps(single_resource_schema_request.json()["fields"], indent=4)
json_formatted_schema = "\n".join(["\t" + json_line for json_line in json_formatted_schema.split("\n")])

allowed_detail_http_methods = single_resource_schema_request.json()["allowed_detail_http_methods"]
allowed_list_http_methods = single_resource_schema_request.json()["allowed_list_http_methods"]
json_formatted_demo = json.dumps(
single_resource_demo_request.json(), indent=4
)
json_formatted_demo = "\n".join(
["\t" + json_line for json_line in json_formatted_demo.split("\n")]
)

json_formatted_schema = json.dumps(
single_resource_schema_request.json()["fields"], indent=4
)
json_formatted_schema = "\n".join(
["\t" + json_line for json_line in json_formatted_schema.split("\n")]
)

allowed_detail_http_methods = single_resource_schema_request.json()[
"allowed_detail_http_methods"
]
allowed_list_http_methods = single_resource_schema_request.json()[
"allowed_list_http_methods"
]

title = lower_case_to_title_case(resource_name).title() + " Resource\n"

output_file.write(".. _api_reference_%s:\n\n" % resource_name.lower())
output_file.write(title)
output_file.write("=" * (len(title)-1) + "\n\n")
output_file.write("=" * (len(title) - 1) + "\n\n")
output_file.write(
"| Resource URL ``%s``\n"
% urlparse.urljoin(DEMO_API_BASE_URL, resource_values["list_endpoint"])
)
output_file.write(
"| Resource URL ``%s``\n" % urlparse.urljoin(DEMO_API_BASE_URL, resource_values["list_endpoint"]))
output_file.write("| Schema URL ``%s``\n" % urlparse.urljoin(DEMO_API_BASE_URL, resource_values["schema"]))
"| Schema URL ``%s``\n"
% urlparse.urljoin(DEMO_API_BASE_URL, resource_values["schema"])
)
output_file.write("| ")

# Write out manual docs portion
output_file.write(
"\n\n" + ".. include:: ../references_manual_extras/" + resource_name.lower() + ".rst" + "\n\n")
"\n\n"
+ ".. include:: ../references_manual_extras/"
+ resource_name.lower()
+ ".rst"
+ "\n\n"
)

# Write out resource schema
cols = ["help_text", "default", "nullable", "readonly", "blank", "unique", "type"]
header_cols = ["help text", "default ", "nullable",
"readonly", "blank", "unique", "type"]
header_cols = ["help text", "default", "nullable", "readonly", "blank", "unique", "type"]
cols = [
"help_text",
"default",
"nullable",
"readonly",
"blank",
"unique",
"type",
]
header_cols = [
"help text",
"default ",
"nullable",
"readonly",
"blank",
"unique",
"type",
]
header_cols = [
"help text",
"default",
"nullable",
"readonly",
"blank",
"unique",
"type",
]
output_file.write("Resource Fields\n")
output_file.write("---------------" + "\n\n")
toprow = ["field"]
toprow.extend(header_cols)
table = [toprow]
for field, values in single_resource_schema_request.json()["fields"].iteritems():
for field, values in single_resource_schema_request.json()[
"fields"
].items():
row = ["**" + field + "**"]
for col in cols:
if col == "default" and values[col] == "No default provided.":
Expand Down Expand Up @@ -212,4 +354,6 @@ def lower_case_to_title_case(string):
"ordering": []
"""

resource_doc_paths.append(os.path.relpath(resource_doc_path, "source").replace("\\", "/"))
resource_doc_paths.append(
os.path.relpath(resource_doc_path, "source").replace("\\", "/")
)
Loading

0 comments on commit 4983f5a

Please sign in to comment.