import io
import glob
import json
import os
from pathlib import Path
import re
import subprocess

files = dict()
search = []

def importJs(jsPath):
    with io.open(jsPath, 'r') as jsFile:
        # Strip out the js specific stuff to turn it into json
        inText = jsFile.read()
        jsonText = re.sub(r"(files(.*?)= )|(^[\/\/].*)", "", inText, flags=re.MULTILINE)
        key = re.search(r"(?<=files\[\").+?(?=\"\])", inText).group()
        file = json.loads(jsonText)
        file["jsPath"] = jsPath
        files[key] = file

def addField(field, fileEntry, htmlPathStr):
    # Field itself
    fieldEntry = {}
    fieldEntry["name"] = field["name"]
    fieldEntry["path"] = htmlPathStr + "#" + field["name"]

    # Sub-fields in alt names
    if "altNames" in field:
        for altname in field["altNames"]:
            subfieldEntry = {}
            subfieldEntry["name"] = altname
            subfieldEntry["path"] = htmlPathStr + "#" + altname
            fieldEntry.setdefault("subfields", []).append(subfieldEntry)

    # Sub-fields in the table
    if "table" in field:
        for row in field["table"]:
            for col in row:
                if isinstance(col, dict):
                    subfieldEntry = {}
                    subfieldEntry["name"] = col["text"]
                    subfieldEntry["path"] = htmlPathStr + "#" + col["id"]
                    fieldEntry.setdefault("subfields", []).append(subfieldEntry)

    fileEntry["fields"].append(fieldEntry)

def convertFileToSearch(file):
    if "notSearchable" in file and file["notSearchable"]:
        return

    htmlPathStr = "/files/" + os.path.basename(file["jsPath"].with_suffix('.html'))

    # First add the file
    fileEntry = {}
    fileEntry["name"] = file["title"]
    fileEntry["path"] = htmlPathStr
    fileEntry.setdefault("fields", [])

    # Then every field
    for field in file["fields"]:
        addField(field, fileEntry, htmlPathStr)

    # Then every field from appended files
    if "appendFiles" in file:
        for appendFile in file["appendFiles"]:
            for field in files[appendFile]["fields"]:
                addField(field, fileEntry, htmlPathStr)

    search.append(fileEntry)

def exportSearch(searchPath):
    with io.open(searchPath, 'w') as searchFile:
        output = "// Generated by generate-search.py\n// To let users open the HTML files directly without a local server, we need to eliminate any CORS requests like \"fetch\".\n// Workaround is to place json into .js files and then load them via html script tags.\n// https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS/Errors/CORSRequestNotHttp\nconst search = " + json.dumps(search, indent=4)
        searchFile.write(output)

def generateSearch(dataPath, usePerforce):
    searchPath = Path(os.path.join(dataPath, "search.js"))
    if (usePerforce):
        # Gather perforce files to checkout or add
        p4EditString = ""
        p4AddString = ""
        if os.path.exists(searchPath):
            p4EditString += str(searchPath)
        else:
            p4AddString += searchPath

        # Check out or add files
        if p4EditString:
            subprocess.run("p4 edit " + p4EditString, check=True, shell=True, cwd=searchPath.parent)
        if p4AddString:
            subprocess.run("p4 add " + p4AddString, check=True, shell=True, cwd=searchPath.parent)

    # Import all the files
    dataFilesPath = os.path.join(dataPath, "files")
    for jsFilepath in glob.glob(os.path.join(dataFilesPath, '*.js')):
        importJs(Path(jsFilepath))

    # Add each file to the search index
    for file in files.values():
        convertFileToSearch(file)

    # Export the search index
    exportSearch(searchPath)

    if (usePerforce):
        # Revert unchanged files from perforce
        subprocess.run("p4 revert -a -c default", check=True, shell=True, cwd=searchPath.parent)

    return True