commit 69f1de45a3280155277e7fbadba5cf71ed5995b6 Author: Dunemask Date: Sat Jul 24 15:42:36 2021 -0600 Dunestash Public Backend 0.0.1-a.1 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6b74ef2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +# For Deploy +src/node_modules/ +src/desert/ +src/package-lock.json +src/nodemon.json +src/zips/ +src/uploads/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..f868b42 --- /dev/null +++ b/LICENSE @@ -0,0 +1,504 @@ +GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 + USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random + Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! diff --git a/README.md b/README.md new file mode 100644 index 0000000..850a99b --- /dev/null +++ b/README.md @@ -0,0 +1,2 @@ +# Dunestash + Dunestash is a FOSS application/server for sharing files. diff --git a/Roadmap.md b/Roadmap.md new file mode 100644 index 0000000..d5adbeb --- /dev/null +++ b/Roadmap.md @@ -0,0 +1,15 @@ +# Roadmap + +## These are the plans and requirements for the next phases of development + +### API Server + +* Rewrite authentication methods +* Separate user info from Dunestash info, they're going to be different +* Encrypt All user data, we're not letting anything get compromised. + +### Front end + +* Abstractify the current react project and seperate it from dunestash +* Contextualize the code to make variable access simpler +* Move state handling beyond classic Class methodologies diff --git a/mocks/LoadFLow.xcf b/mocks/LoadFLow.xcf new file mode 100644 index 0000000..24f48e6 Binary files /dev/null and b/mocks/LoadFLow.xcf differ diff --git a/mocks/Reference.xcf b/mocks/Reference.xcf new file mode 100644 index 0000000..fc47103 Binary files /dev/null and b/mocks/Reference.xcf differ diff --git a/mocks/Upload Mockup.xcf b/mocks/Upload Mockup.xcf new file mode 100644 index 0000000..104b025 Binary files /dev/null and b/mocks/Upload Mockup.xcf differ diff --git a/mocks/grouplogic.png b/mocks/grouplogic.png new file mode 100644 index 0000000..a9d3460 Binary files /dev/null and b/mocks/grouplogic.png differ diff --git a/src/api/storage.js b/src/api/storage.js new file mode 100644 index 0000000..a4d6ecb --- /dev/null +++ b/src/api/storage.js @@ -0,0 +1,176 @@ +//Module Imports +const { resolve: resolvePath } = require("path"); +const { + existsSync: fexists, + mkdirSync: mkdir, + readdirSync: readdir, + unlinkSync: fremove, +} = require("fs"); +const AdmZip = require("adm-zip"); +//Local Imports +const Pharaoh = require("../egypt/pharaoh"); +const desertConfig = require("../egypt/desert_config.json"); +const config = require("../config.json"); +//Constants +const fileStorage = new Pharaoh( + resolvePath(desertConfig.desertPath), + desertConfig.schema +); +const zipDir = resolvePath(config.Storage.ZipPath); +function addFile(fileData) { + fileStorage.addEntry(fileData.fileUuid, "files", fileData); + fileStorage.updateEntry(fileData.owner, "uuid", (entry) => { + if (entry == null) this.createUser(ownerUuid); + entry.owned.push(fileData.fileUuid); + return entry; + }); +} +function updateReferenceOnDelete(fileData) { + if (fileData == null) return; + //Update Users Shared List (edit) + fileData.edit.forEach((user) => { + fileStorage.updateEntry(user, "uuid", (entry) => { + if (entry == null) return; + entry.shared.splice(entry.shared.indexOf(fileData.fileUuid), 1); + return entry; + }); + }); + //Update Users Shared List (view) + fileData.view.forEach((user) => { + fileStorage.updateEntry(user, "uuid", (entry) => { + if (entry == null) return; + entry.shared.splice(entry.shared.indexOf(fileData.fileUuid), 1); + return entry; + }); + }); + + fileStorage.updateEntry(fileData.owner, "uuid", (entry) => { + if (entry == null) return; + entry.owned.splice(entry.owned.indexOf(fileData.fileUuid), 1); + return entry; + }); +} +function deleteFile(fileUuid) { + const fileData = fileStorage.deleteEntry(fileUuid, "files"); + + return fileData; +} +function getFile(fileUuid) { + return fileStorage.loadEntry(fileUuid, "files"); +} +function modifyFile(fileUuid, cb) { + fileStorage.updateEntry(fileUuid, "files", cb); +} +function zipFiles(files) {} +function createUser(uuid) { + const userData = { + owned: [], + shared: [], + storage: config.Storage.UserStorageSize * config.Storage.UserStorageUnit, + usedStorage: 0, + }; + fileStorage.updateEntry(uuid, "uuid", (entry) => { + if (entry != null) return; + return userData; + }); + return userData; +} +function updateUser(ownerUuid, cb) { + fileStorage.updateEntry(ownerUuid, "uuid", cb); +} +function getOwnedFileList(ownerUuid) { + const owner = fileStorage.loadEntry(ownerUuid, "uuid"); + if (owner == null) return; + return owner.owned; +} +function getSharedFileList(ownerUuid) { + const owner = fileStorage.loadEntry(ownerUuid, "uuid"); + if (owner == null) return; + return owner.shared; +} +function setMaxStorage(ownerUuid, newMax) { + fileStorage.updateEntry(ownerUuid, "uuid", (entry) => { + if (entry == null) this.createUser(ownerUuid); + entry.storage = newMax; + return entry; + }); +} +function modifyUsedStorage(ownerUuid, cb) { + fileStorage.updateEntry(ownerUuid, "uuid", (entry) => { + if (entry == null) entry = this.createUser(ownerUuid); + const maxStorage = entry.storage; + const newUsed = + cb(entry.storage, entry.usedStorage ?? 0) ?? entry.usedStorage; + if (newUsed > maxStorage) + throw new Error("New Size Exceeds User Max Storage!"); + entry.usedStorage = newUsed; + return entry; + }); +} +async function buildZip(ownerUuid, paths, zipUuid) { + //Create directory and build zip with adm zip + const zipPath = resolvePath(zipDir, `${zipUuid}.zip`); + var zip = { + owner: ownerUuid, + path: zipPath, + building: true, + }; + fileStorage.addEntry(zipUuid, "zips", zip); + createZip(paths, zipPath).then(() => { + fileStorage.updateEntry(zipUuid, "zips", (entry) => { + if (entry == null) return; + entry.exp = Date.now() + config.Storage.ZipClickExpire; + delete entry.building; + return entry; + }); + }); +} +async function createZip(paths, zipPath) { + if (!fexists(zipDir)) mkdir(zipDir); + let zipFile = new AdmZip(); + paths.forEach((filePath) => { + zipFile.addLocalFile(filePath); + }); + setTimeout(() => zipFile.writeZip(zipPath), 0); +} +function getZipPath(ownerUuid, zipUuid) { + var zipPath, building; + fileStorage.updateEntry(zipUuid, "zips", (entry) => { + if (entry == null || (building = entry.building)) return; + entry.exp = Date.now() + config.Storage.ZipDownloadExpire; + zipPath = entry.path; + return entry; + }); + if (building === true) return building; + if (zipPath == null || !fexists(zipPath)) return; + return zipPath; +} +function cleanZips() { + var zipUuid; + const time = Date.now(); + readdir(zipDir).forEach((file) => { + zipUuid = file.substring(0, file.indexOf(".zip")); + fileStorage.updateEntry(zipUuid, "zips", (entry, deleteEntry) => { + if (entry == null) return; + if (entry.building === true) return; + if (entry.exp <= time) { + deleteEntry(); + fremove(entry.path); + } + }); + }); +} +module.exports = { + addFile, + updateReferenceOnDelete, + deleteFile, + getFile, + modifyFile, + createUser, + updateUser, + getOwnedFileList, + getSharedFileList, + setMaxStorage, + modifyUsedStorage, + cleanZips, +}; diff --git a/src/api/upload.js b/src/api/upload.js new file mode 100644 index 0000000..fdc7ad4 --- /dev/null +++ b/src/api/upload.js @@ -0,0 +1,41 @@ +//Module Imports +const fs = require("fs"); +const { resolve: resolvePath } = require("path"); +const multer = require("multer"); +//Local Imports +const config = require("../config.json"); +//Multer Configs +const userUploadStorage = multer.diskStorage({ + destination: (req, file, cb) => { + cb(null, userUploadDestination(req)); + }, + filename: (req, file, cb) => { + const n = file.originalname.replaceAll(" ", "_"); + const fileName = `${Date.now()}-${n}`; + req.on("aborted", () => { + cancelUpload(resolvePath(userUploadDestination(req), fileName)); + }); + cb(null, fileName); + }, +}); +const userUpload = multer({ + storage: userUploadStorage, +}).single("user-selected-file"); + +//Helper Methods +function userUploadDestination(req) { + if (!fs.existsSync(resolvePath(config.Storage.UploadPath))) + fs.mkdirSync(resolvePath(config.Storage.UploadPath)); + const destination = resolvePath(config.Storage.UploadPath, req.session.uuid); + if (!fs.existsSync(destination)) fs.mkdirSync(destination); + return destination; +} + +function cancelUpload(path) { + if (path != null && fs.existsSync(path)) fs.unlinkSync(path); +} + +module.exports = { + userUpload, + cancelUpload, +}; diff --git a/src/api/user.js b/src/api/user.js new file mode 100644 index 0000000..658bc06 --- /dev/null +++ b/src/api/user.js @@ -0,0 +1,239 @@ +//Module Imports +const { resolve: resolvePath } = require("path"); +const { existsSync: fexists, unlinkSync: fremove } = require("fs"); +const uuidGen = require("uuid-with-v6").v6; +//Local Imports +const storage = require("./storage"); +const config = require("../config.json"); +/** + * Generates a new uuid.v6() and reverses the uuid so the timestamp is at the end + * This should provide an additional layer of "randomness" and decrease the chances + * of duplicate uuid's being generated. + * This is reversed to force the custom DB to expand faster at first rather than + * later when there are lots of entries. + */ +function generateUuid() { + return [...uuidGen()].reverse().join(""); +} +/** + * Create a user with a uuid (should use Dunestorm API to login) + */ +function createUser(uuid) { + storage.createUser(uuid); +} +/** + * Creates file entry given aspects of a file updated + */ +function uploadFile(uuid, fileData) { + const fileUuid = generateUuid(); + var sizeAccepted; + storage.modifyUsedStorage(uuid, (max, used) => { + const oldUsed = used; + used += fileData.size; + if ((sizeAccepted = used <= max)) return used; + }); + if (!sizeAccepted) return; + const file = { + fileUuid, + path: fileData.path, + owner: uuid, + name: fileData.originalname, + date: fileData.filename.substring(0, fileData.filename.indexOf("-")), + size: fileData.size, + public: false, + edit: [], + view: [], + }; + storage.addFile(file); + return file; +} +/** + TODO: ASYNCIFY? + Removes user references to files that are being deleted + */ +function removeEntryLinks(files) { + for (var o in files.owner) { + storage.updateUser(o, (entry) => { + if (entry == null) return; + files.owner[o].forEach((file) => { + entry.owned.splice(entry.owned.indexOf(file.fileUuid), 1); + entry.usedStorage -= file.size; + }); + return entry; + }); + } + for (var user in files.edit) { + storage.updateUser(user, (entry) => { + if (entry == null) return; + files.edit[user].forEach((file) => { + entry.edit.splice(entry.edit.indexOf(file), 1); + }); + return entry; + }); + } + for (var user in files.view) { + storage.updateUser(user, (entry) => { + if (entry == null) return; + files.view[user].forEach((file) => { + entry.view.splice(entry.view.indexOf(file), 1); + }); + return entry; + }); + } +} +/** + * Deletes files. + * Requires Uuid to garuntee permission to delete a file + * Sorts files by user before deleting to speed up reference updates + */ +function deleteFiles(uuid, targetFiles) { + var deleteFails = []; + //Sort files by fileuuid to remove entries from the various users + var filesSortedByUser = { + owner: {}, + edit: {}, + view: {}, + }; + targetFiles.forEach((targetFile) => { + storage.modifyFile(targetFile, (entry, deleteEntry) => { + if (!authorizedToEditFile(uuid, entry)) return; + //Add owner and file size to the update object + if (filesSortedByUser.owner[entry.owner] == null) + filesSortedByUser.owner[entry.owner] = []; + filesSortedByUser.owner[entry.owner].push({ + fileUuid: targetFile, + size: entry.size, + }); + //Add edit members to the edit update + for (var id of entry.edit) { + if (entry.edit[id] == null) entry.edit[id] = []; + entry.edit[id].push(targetFile); + } + //Add view members to the view update + for (var id of entry.view) { + if (entry.view[id] == null) entry.view[id] = []; + entry.view[id].push(targetFile); + } + //Throw stuff in a catch, we need to make sure we delete the file physically + try { + deleteEntry(entry); + fremove(entry.path); + } catch (e) { + console.error("Error Deleting File", entry.name, "\nPath:", entry.path); + deleteFails.push(targetFile); + } + }); + }); + //Updates user entries using the filesSortedByUser + removeEntryLinks(filesSortedByUser); + //Return the new used storage to update the database + return deleteFails.length > 0 && deleteFails; +} +/** + * Checks that a user is authourized to view the file and then + * Returns the physical filePath of a desired file (uses entry to find path) + */ +function getFilePath(uuid, targetFile) { + const fileData = storage.getFile(targetFile); + if (!authorizedToViewFile(uuid, fileData)) return; + if (fexists(fileData.path)) return fileData.path; +} +/** + * Returns a list of fileUuids that the user owns + */ +function getOwnedFiles(uuid) { + const fileList = storage.getOwnedFileList(uuid); + if (fileList == null) return []; + var files = new Array(fileList.length); + fileList.forEach((file, i) => { + files[i] = storage.getFile(file); + }); + return files; +} +/** + * TODO: Impliment Zips + * Creates a zip file and returns the zipUuid to the client. + */ +async function requestZip(uuid, targetFiles, cb) { + var zipPath, fileData; + var filePaths = new Array(targetFiles.length); + for (var file of targetFiles) { + fileData = storage.getFile(file); + if (!authorizedToViewFile(uuid, fileData)) return; + if (!fexists(fileData.path)) return; + filePaths.push(fileData.path); + } + const zipUuid = generateUuid(); + cb(zipUuid); + setTimeout(() => storage.buildZip(uuid, filePaths, zipUuid), 0); + return zipUuid; +} +/** + * TODO: Impliment Zips + * Returns zip path from a zipUuid + */ +function getZipPath(uuid, targetZip) { + return storage.getZipPath(uuid, targetZip); +} +/** + * TODO: Impliment Advanced Sharing + * Shares file with various people, and various permissions + */ +function shareFile(uuid, targetFile) { + console.log(uuid, "requesting to share file"); + console.log(targetFile); +} +/** + * TODO: Impliment Advanced Sharing + * Returns all files shared with a user + */ +function getSharedFiles(uuid) { + return storage.getSharedFileList(uuid); +} +/** + * Checks is a user is authorized to edit a particular file + */ +function authorizedToEditFile(client, fileData) { + if (fileData == null) return false; + if (fileData.owner === client) return true; + return fileData.edit.includes(client); +} +/** + * Checks is a user is authorized to view a particular file + */ +function authorizedToViewFile(client, fileData) { + if (fileData == null) return false; + if (fileData.public === true) return true; + if (fileData.owner === client) return true; + return fileData.edit.includes(client) || fileData.view.includes(client); +} +/** + * Checks if a the user is the owner and then toggles the list of files to public + */ +function publicfyFiles(uuid, files) { + var publicfyFails = []; + files.forEach((file, i) => { + storage.modifyFile(file, (entry) => { + if (entry == null || entry.owner !== uuid) { + publicfyFails.push(file); + return; + } + entry.public = !entry.public; + return entry; + }); + }); + //Return the new used storage to update the database + return publicfyFails.length > 0 && publicfyFails; +} +module.exports = { + createUser, + uploadFile, + deleteFiles, + getFilePath, + getOwnedFiles, + publicfyFiles, + shareFile, + getSharedFiles, + requestZip, + getZipPath, +}; diff --git a/src/config.json b/src/config.json new file mode 100644 index 0000000..383a53e --- /dev/null +++ b/src/config.json @@ -0,0 +1,20 @@ +{ + "Storage": { + "DesertPath": "desert/", + "UploadPath": "uploads/", + "ZipPath": "zips/", + "UserStorageSize": 2048, + "UserStorageUnit": 1048576, + "UploadMaxSize": "", + "ZipClickExpire": 60000, + "ZipDownloadExpire": 720000 + }, + "Server": { + "Port": 4000, + "Debug": false, + "BodyLimit": "5mb", + "ZipRemovalInterval": 1800000, + "jwtHeader": "authorization", + "authServer": "http://localhost:4001/api/user/data" + } +} diff --git a/src/egypt/desert_config.json b/src/egypt/desert_config.json new file mode 100644 index 0000000..3574469 --- /dev/null +++ b/src/egypt/desert_config.json @@ -0,0 +1,33 @@ +{ + "desertPath": "desert/", + "schema": { + "files": { + "tokenList": "abcdefghijklmnopqrstuvwxyz0123456789", + "tokenSplitters": "0123456789abcdef", + "entrySplit": 500, + "attr": [ + "fileUuid", + "path", + "owner", + "name", + "date", + "size", + "public", + "edit", + "view" + ] + }, + "zips": { + "tokenList": "abcdefghijklmnopqrstuvwxyz0123456789", + "tokenSplitters": "0123456789abcdef", + "entrySplit": 500, + "attr": ["owner", "path", "exp"] + }, + "uuid": { + "tokenList": "abcdefghijklmnopqrstuvwxyz0123456789", + "tokenSplitters": "0123456789abcdef", + "entrySplit": 500, + "attr": ["owned", "shared", "storage", "usedStorage"] + } + } +} diff --git a/src/egypt/pharaoh.js b/src/egypt/pharaoh.js new file mode 100644 index 0000000..d865c52 --- /dev/null +++ b/src/egypt/pharaoh.js @@ -0,0 +1,151 @@ +//Module Imports +const { + existsSync: fexists, + readFileSync: fread, + writeFileSync: fwrite, + rmSync: fremove, +} = require("fs"); +const { join: joinPath, basename } = require("path"); +const _ = require("lodash"); +//Local Imports +const Pyramid = require("./pyramid"); +//Constants +//Misc Functions +function isObject(obj) { + return typeof obj === "object" && !(obj instanceof Array); +} +//Main Class +module.exports = class Pharoah { + constructor(desertPath, schema) { + this.desertPath = desertPath; + var mainStorageName = Object.keys(schema)[0]; + var mainStorage = schema[mainStorageName]; + this.pyramids = {}; + this.pyramidSchemas = this.#buildSchema(schema); + } + + addEntry(query, pyramidName, entry) { + this.pyramids[pyramidName].pyramid.addStorageEntry(query, entry); + if (!(this.pyramids[pyramidName].refs instanceof Array)) { + return; + } + for (var r of this.pyramids[pyramidName].refs) { + if (entry[r] != null) + this.pyramids[r].pyramid.addStorageEntry(entry[r], query); + } + } + + deleteEntry(query, pyramidName) { + const entry = this.pyramids[pyramidName].pyramid.removeStorageEntry(query); + if (entry == null) return; + if (!(this.pyramids[pyramidName].refs instanceof Array)) return entry; + for (var r of this.pyramids[pyramidName].refs) { + if (entry[r] != null) + this.pyramids[r].pyramid.removeStorageEntry(entry[r]); + } + return entry; + } + + updateEntry(query, pyramidName, cb) { + if (cb === null || typeof cb !== "function") + throw new Error("Error: Callback cannot be " + cb); + const mainPyramid = this.pyramids[pyramidName]; + mainPyramid.pyramid.loadStorageEntry( + query, + (entry, update, deleteEntry) => { + const oldWasObject = isObject(entry); + const oldEntry = _.cloneDeep(entry); + var entryDeleted; + entry = cb(entry, () => { + if (oldWasObject) + this.#updateDeleteRef(deleteEntry, oldEntry, mainPyramid.refs); + else deleteEntry(); + entryDeleted = true; + }); + if (entry == null || entryDeleted || _.isEqual(oldEntry, entry)) return; + update(entry); + //If there are no ref objects, just return. + if (!(mainPyramid.refs instanceof Array)) return; + const newIsObject = isObject(entry); + //If both are objects, compare a difference in the refs, + if (oldWasObject && newIsObject) { + for (var r of mainPyramid.refs) { + if (!_.isEqual(oldEntry[r], entry[r])) + this.#updateRef(this.pyramids[r], oldEntry[r], entry[r], query); + } + //If only the old is an object, remove the old references + } else if (oldWasObject && !newIsObject) { + for (var r of mainPyramid.refs) { + if (entry[r] != null) + this.pyramids[r].pyramid.removeStorageEntry(oldEntry[r]); + } + //If only the new is an object, only add the new references + } else if (!oldWasObject && newIsObject) { + for (var r of mainPyramid.refs) { + if (entry[r] != null) + this.pyramids[r].pyramid.addStorageEntry(entry[r], query); + } + } + } + ); + } + + loadEntry(query, pyramidName) { + const data = this.pyramids[pyramidName].pyramid.loadStorageEntry(query); + return data; + } + + loadEntryByReference(query, refPyramid, targetPyramid) { + const ref = this.loadEntry(query, refPyramid); + if (ref == null) return; + if (typeof ref !== "string") throw new Error("Pointer Must Be a String!"); + return this.loadEntry(ref, targetPyramid); + } + + updateEntryByReference(query, refPyramid, targetPyramid, cb) { + const ref = this.loadEntry(query, refPyramid); + if (ref == null) return; + if (typeof ref !== "string") throw new Error("Pointer Must Be a String!"); + this.updateEntry(ref, targetPyramid, cb); + } + + #buildSchema(schema) { + var pyramidPath, schem; + for (var s in schema) { + pyramidPath = joinPath(this.desertPath, s); + if (fexists(pyramidPath)) schem = { pyramid: Pyramid.load(pyramidPath) }; + else + schem = { + pyramid: new Pyramid( + pyramidPath, + schema[s].tokenList, + schema[s].tokenSplitters, + schema[s].entrySplit + ), + }; + if (schema[s].attr != null) schem.attr = schema[s].attr; + if (schema[s].refs != null) schem.refs = schema[s].refs; + this.pyramids[s] = schem; + } + } + + #updateRef(pyramidSchema, oldRef, newRef, pointer) { + if (oldRef == null && pointer != null) + pyramidSchema.pyramid.addStorageEntry(newRef, pointer); + else if (oldRef != null && newRef != null) + pyramidSchema.pyramid.moveStorageEntry(oldRef, newRef); + else if (oldRef != null && newRef == null) + pyramidSchema.pyramid.removeStorageEntry(oldRef); + } + + #updateDeleteRef(pyramidDelete, entry, refs) { + //Calls Delete within the loadStorageEntry function + pyramidDelete(); + if (refs != null && refs instanceof Array) + for (var r of refs) { + if (entry[r] != null) + this.pyramids[r].pyramid.removeStorageEntry(entry[r]); + } + return true; + } +}; diff --git a/src/egypt/pyramid.js b/src/egypt/pyramid.js new file mode 100644 index 0000000..17ef991 --- /dev/null +++ b/src/egypt/pyramid.js @@ -0,0 +1,365 @@ +//Module Imports +const { + mkdirSync: mkdir, + existsSync: fexists, + readFileSync: fread, + writeFileSync: fwrite, + rmSync: fremove, +} = require("fs"); +const { join: joinPath, basename } = require("path"); +//Constants +const pyramidEncoding = "utf8"; +const pointerExtension = ".json"; +const storageExtension = ".json"; +module.exports = class Pyramid { + static load(filePath) { + const pointerFile = filePath + pointerExtension; + if (!fexists(pointerFile)) + throw new Error(`Pyramid Not Found At ${pointerFile}`); + const pointerData = JSON.parse(fread(pointerFile, pyramidEncoding)); + return new Pyramid( + filePath, + pointerData.tokenList, + pointerData.tokenSplitters, + pointerData.entrySplit, + false + ); + } + /** + * Constructor for Pyramid + * Note: entrySplit is set to 250 automatically + * This was the best balance of ram usage and time + * Sample size was 50k items time: 63s average + */ + constructor( + filePath, + tokenList, + tokenSplitters, + entrySplit = 250, + create = true + ) { + if (filePath == null) throw new Error(`Filepath cannot be: ${filePath}`); + if (tokenSplitters == null) + throw new Error(`tokenSplitters cannot be: ${tokenSplitters}`); + this.name = basename(filePath); + this.pointerFile = filePath + pointerExtension; + this.storagePath = filePath; + this.tokenSplitters = tokenSplitters; + this.entrySplit = entrySplit; + this.tokenList = tokenList; + //Create Physical Directories + if (create) { + this.#createPyramid(); + } + } + /** + * Writes Entry to a queried storage + */ + addStorageEntry(query, entry) { + var pointer = this.#loadPointer(); + var storageToken = this.#queryStorageToken(query, pointer); + var storageCount = this.#getStorageCount(storageToken, pointer); + if (storageCount + 1 >= this.entrySplit) { + pointer = this.#splitStorage(storageToken); + storageToken = this.#queryStorageToken(query, pointer); + } + var storageData = this.#loadStorage(storageToken); + if (storageData[query] == null) + this.#modifyStorageCount(storageToken, 1, pointer); + storageData[query] = entry; + this.#writeStorage(storageToken, storageData); + } + /** + * Removes Entry from a queried storage + */ + removeStorageEntry(query) { + const pointer = this.#loadPointer(); + const storageToken = this.#queryStorageToken(query, pointer); + var storageData = this.#loadStorage(storageToken); + const entry = storageData[query]; + delete storageData[query]; + if (entry != null) this.#modifyStorageCount(storageToken, -1, pointer); + this.#writeStorage(storageToken, storageData); + return entry; + } + + /** + * Load User Entry from a query + */ + loadStorageEntry(query, cb) { + const pointer = this.#loadPointer(); + const storageToken = this.#queryStorageToken(query, pointer); + var storageData = this.#loadStorage(storageToken); + const entry = storageData[query]; + if (cb !== null && typeof cb === "function") { + cb( + entry, + //Update Function + (modifiedEntry) => { + if (storageData[query] != null) + this.#writeEntryChanges( + query, + storageToken, + storageData, + modifiedEntry + ); + else this.addStorageEntry(query, modifiedEntry); + }, + //Delete Function + () => { + delete storageData[query]; + if (entry != null) + this.#modifyStorageCount(storageToken, -1, pointer); + this.#writeStorage(storageToken, storageData); + } + ); + } else return entry; + } + /** + * Modify entry provided by a query + */ + modifyStorageEntry(query, entry) { + const pointer = this.#loadPointer(); + const storageToken = this.#queryStorageToken(query, pointer); + var storageData = this.#loadStorage(storageToken); + if (storageData[entry] == null) + throw new Error(`Query ${query} not found!`); + storageData[query] = entry; + this.#writeStorage(storageToken, storageData); + } + + /** + * Moves one queries entry to another queries entry. + */ + moveStorageEntry(oldQuery, newQuery) { + const pointer = this.#loadPointer(); + const oldStorageToken = this.#queryStorageToken(oldQuery); + const newStorageToken = this.#queryStorageToken(newQuery); + if (oldStorageToken === newStorageToken) return; + //Remove old data + var storageData = this.#loadStorage(oldStorageToken); + if (storageData == null) return; + const entry = storageData[oldQuery]; + if (entry == null) return; + delete storageData[oldQuery]; + this.#modifyStorageCount(oldStorageToken, -1, pointer); + this.#writeStorage(oldStorageToken, storageData); + //Add new data + storageData = this.#loadStorage(newStorageToken); + if (storageData[newQuery] == null) + this.#modifyStorageCount(newStorageToken, 1, pointer); + storageData[newQuery] = entry; + this.#writeStorage(newStorageToken, storageData); + } + /** + * Writes storageData to specific storage + */ + #writeEntryChanges(query, storageToken, storageData, modifiedEntry) { + storageData[query] = modifiedEntry; + this.#writeStorage(storageToken, storageData); + } + /** + * Returns Count of entries in storage (indexed by the pointer) + */ + #getStorageCount(storageToken, pointer = this.#loadPointer()) { + var parent = pointer.entries; + for (var st of storageToken) { + parent = parent[st]; + } + return parent; + } + /** + * Sets Count of entries in storage (stored in pointerFile) + */ + #setStorageCount(storageToken, value, pointer = this.#loadPointer()) { + var parentStack = []; + var parent = pointer.entries; + //Create "stack" of the parent references + for (var st of storageToken) { + parentStack.push(parent); + parent = parent[st]; + } + //Add Value to the stack + parentStack.push(value); + //Add the modified child to the previous parent, rinse, repeat, victory! + for (var p = parentStack.length - 1; p >= 0; p--) { + if (parentStack[p - 1] == null) break; + parentStack[p - 1][storageToken[p - 1]] = parentStack[p]; + } + pointer.entries = parentStack[0]; + this.#writePointer(pointer); + return pointer; + } + /** + * Modifies the storageCount by the given value + */ + #modifyStorageCount(storageToken, value, pointer = this.#loadPointer()) { + var parentStack = []; + var parent = pointer.entries; + //Create "stack" of the parent references + for (var st of storageToken) { + parentStack.push(parent); + parent = parent[st]; + } + //Mdofiy original value and add it to the stack + parentStack.push(parent + value); + //Add the modified child to the previous parent, rinse, repeat, victory! + for (var p = parentStack.length - 1; p >= 0; p--) { + if (parentStack[p - 1] == null) break; + parentStack[p - 1][storageToken[p - 1]] = parentStack[p]; + } + pointer.entries = parentStack[0]; + this.#writePointer(pointer); + return pointer; + } + /** + * Returns storageToken that would contain query + */ + #queryStorageToken(query, pointer = this.#loadPointer()) { + var parent = pointer.entries; + var tokenLocation; + var tokenStack = ""; + for (var l = 0; l < query.length; l++) { + //Get the category to 'sink' current letter into + if (this.tokenSplitters.includes(query[l])) tokenLocation = query[l]; + else tokenLocation = this.#queryStorageTokenSplitter(query[l]); + tokenStack += tokenLocation; + //If tokenStack is exactly their token ex:amamam + //Immediately append the first part of the letter stack + if (tokenStack === query) { + var extraTokens = 0; + while ((parent = parent[this.tokenSplitters[0]])) extraTokens++; + return tokenStack + this.tokenSplitters[0].repeat(extraTokens); + } + //If Parent has children, keep going + if ( + typeof parent[tokenLocation] === "object" && + !(parent[tokenLocation] instanceof Array) + ) { + parent = parent[tokenLocation]; + } else break; + } + return tokenStack; + } + /** + * Returns token that should envelop the queried token + * EX: tokenSplitters = [a,m] Query = c returns a + */ + #queryStorageTokenSplitter(query) { + const queryIndex = this.tokenList.indexOf(query); + if (queryIndex === -1) return this.tokenSplitters[0]; + for (var s in this.tokenSplitters) { + if (this.tokenList.indexOf(this.tokenSplitters[s]) >= queryIndex) + return this.tokenSplitters[Math.max(0, s - 1)]; + } + return this.tokenSplitters[s]; + } + /** + * Creates Pointer File and intializes all storages + */ + #createPyramid() { + if (fexists(this.pointerFile)) { + console.warn( + "Warning: DatabasePointer already exists!", + this.pointerFile, + "Skipping.." + ); + return; + } + var databaseSchema = { + entrySplit: this.entrySplit, + tokenSplitters: this.tokenSplitters, + tokenList: this.tokenList, + entries: {}, + }; + if (!fexists(this.storagePath)) mkdir(this.storagePath); + //Add All + for (var s of this.tokenSplitters) { + databaseSchema.entries[s] = 0; + this.#writeStorage(s, {}); + } + this.#writePointer(databaseSchema); + } + /** + * Writes data to Pyramid Storage based on storageToken + */ + #writeStorage(storageToken, data) { + const storageFile = this.#storageFile(storageToken); + fwrite(storageFile, JSON.stringify(data)); + } + /** + * Write data pointerFile + */ + #writePointer(data) { + fwrite(this.pointerFile, JSON.stringify(data)); + } + /** + * Load pointerFile Object + */ + #loadPointer() { + return JSON.parse(fread(this.pointerFile, pyramidEncoding)); + } + /** + * Loads storage object given a storageToken + */ + #loadStorage(storageToken) { + return JSON.parse(fread(this.#storageFile(storageToken), pyramidEncoding)); + } + /** + * Deletes a particular storage + */ + #removeStorage(storageToken) { + const storageFile = this.#storageFile(storageToken); + if (fexists(storageFile)) fremove(storageFile); + } + /** + * Joins the storageToken path and pyramidpath + */ + #storageFile(token) { + return joinPath(this.storagePath, token + storageExtension); + } + /** + * Splits the specified storage (located with storageToken) + * into the different files specified by this.tokenSplitters + */ + #splitStorage(oldToken, pointer = this.#loadPointer()) { + const oldStorage = this.#loadStorage(oldToken); + const oldKeys = Object.keys(oldStorage); + var newStorages = {}; + var newStorageCount = {}; + //Set Default Count and create empty objects for new storage + for (var s of this.tokenSplitters) { + newStorageCount[s] = 0; + newStorages[oldToken + s] = {}; + } + /*Calculate next token to add to the storagePath for each entry + *If the token is already <= the length of the new token + *add the standard first token. + *Ex: entry: mouse into mouse -> mousea + *Else calculate next letter + * + * Then add to new storage count and assign storage + * In theory the total ram useage should never be more than double + * than the original size of the storage + */ + var migratedTokenStack, additionalToken; + for (var k of oldKeys) { + migratedTokenStack = oldToken; + if (k.length <= oldToken.length + 1) + additionalToken = this.tokenSplitters[0]; + else + additionalToken = this.#queryStorageTokenSplitter( + this.tokenSplitters, + k[oldToken.length] + ); + migratedTokenStack += additionalToken; + newStorages[migratedTokenStack][k] = oldStorage[k]; + newStorageCount[additionalToken]++; + } + for (var st in newStorages) { + this.#writeStorage(st, newStorages[st]); + } + this.#removeStorage(oldToken); + return this.#setStorageCount(oldToken, newStorageCount); + } +}; diff --git a/src/package.json b/src/package.json new file mode 100644 index 0000000..425517e --- /dev/null +++ b/src/package.json @@ -0,0 +1,33 @@ +{ + "name": "duneserver", + "version": "1.0.0", + "description": "Dunemask Web server", + "main": "server.js", + "scripts": { + "start": "node server.js --no-warnings", + "test-server": "nodemon src/server.js" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/Dunemask/npmserver.git" + }, + "author": "", + "license": "ISC", + "bugs": { + "url": "https://github.com/Dunemask/npmserver/issues" + }, + "homepage": "https://github.com/Dunemask/npmserver#readme", + "dependencies": { + "adm-zip": "^0.5.5", + "axios": "^0.21.1", + "bcrypt": "^5.0.1", + "body-parser": "^1.19.0", + "express": "^4.17.1", + "express-session": "^1.17.1", + "lodash": "^4.17.21", + "multer": "^1.4.2", + "path": "^0.12.7", + "rimraf": "^3.0.2", + "uuid-with-v6": "^1.1.2" + } +} diff --git a/src/routes/stash.js b/src/routes/stash.js new file mode 100644 index 0000000..526f968 --- /dev/null +++ b/src/routes/stash.js @@ -0,0 +1,106 @@ +const express = require("express"); +const axios = require("axios"); +//Local Imports & Configs +const asUser = require("../api/user"); +const upload = require("../api/upload"); +const config = require("../config.json"); +//Establish path and create router +/** Absolute Router Path /api/stash*/ +const router = express.Router(); + +const authMiddleware = (req, res, next) => { + if (req.session.uuid != null) return next(); + var headers = {}; + var bearerToken = req.get(config.Server.jwtHeader); + if (bearerToken == null) return res.sendStatus(401); + headers[config.Server.jwtHeader] = bearerToken; + axios + .get(config.Server.authServer, { headers }) + .then((authRes) => { + if (authRes.status !== 200) return res.sendStatus(401); + if (authRes.data != null) { + req.session.uuid = authRes.data.uuid; + next(); + } else res.sendStatus(401); + }) + .catch((e) => { + if (e.response != null) res.sendStatus(e.response.status); + else res.sendStatus(401); + }); +}; + +router.get("/files", authMiddleware, (req, res) => { + const files = asUser.getOwnedFiles(req.session.uuid); + res.status(200).json(files); +}); + +router.post("/upload", authMiddleware, (req, res) => { + upload.userUpload(req, res, (err) => { + if (err || req.file == null) return res.sendStatus(500); + const fileData = asUser.uploadFile(req.session.uuid, req.file); + if (fileData == null) { + upload.cancelUpload(req.file.path); + return res.sendStatus(500); + } + res.json(fileData); + }); +}); + +router.post("/delete", authMiddleware, (req, res) => { + if (!req.body || !(req.body instanceof Array)) { + return res.sendStatus(400); + } + const failed = asUser.deleteFiles(req.session.uuid, req.body); + if (!failed) return res.sendStatus(200); + res.status(500).json(failed); +}); + +router.get("/download", async (req, res) => { + if (!req.query || (!req.query.target && !req.query.zipTarget)) + return res.sendStatus(404); + if (req.query.target) { + const filePath = asUser.getFilePath(req.session.uuid, req.query.target); + if (!filePath) return res.sendStatus(404); + return res.download(filePath); + } + //ZIPS ARE NOT SUPPORTED YET + return res.sendStatus(404); + if (req.session.uuid == null) return res.sendStatus(401); + if (req.query.zipTarget) { + const zipPath = asUser.getZip(req.session.uuid, req.query.zipTarget); + if (zipPath === true) return res.sendStatus(503); + if (zipPath == null) return res.sendStatus(404); + res.download(zipPath); + } +}); + +//TODO +router.post("/download", authMiddleware, (req, res) => { + //ZIPS ARE NOT SUPPORTED YET + return res.sendStatus(404); + if (!req.body || !(req.body instanceof Array)) { + return res.sendStatus(400); + } + asUser.requestZip(req.session.uuid, req.body, (zipUuid) => { + console.log("Client can start checking"); + return res.json(zipUuid); + }); +}); + +router.get("/raw", (req, res) => { + if (!req.query || !req.query.target) return res.sendStatus(404); + const filePath = asUser.getFilePath(req.session.uuid, req.query.target); + if (!filePath) return res.sendStatus(404); + res.sendFile(filePath); +}); + +router.post("/public", authMiddleware, async (req, res) => { + if (!req.body || !(req.body instanceof Array)) { + return res.sendStatus(400); + } + const failed = asUser.publicfyFiles(req.session.uuid, req.body); + if (!failed) return res.sendStatus(200); + res.status(500).json(failed); +}); + +module.exports = router; diff --git a/src/server.js b/src/server.js new file mode 100644 index 0000000..db95860 --- /dev/null +++ b/src/server.js @@ -0,0 +1,39 @@ +//Imports +const express = require("express"); +const session = require("express-session"); +const bodyParser = require("body-parser"); +const secret = require("uuid-with-v6").v6; +//Local Imports +const { Web, StatusCode, Server } = require("./config.json"); +//Import Routers +const stashRouter = require("./routes/stash"); +const storage = require("./api/storage"); +//Define Constants & Setup Database +const app = express(); +const port = Server.Port; +const debuggingMode = Server.Debug; +const viewOptions = { beautify: false }; + +//Set Up Express session and View engine +app.use(session({ secret: secret(), saveUninitialized: false, resave: false })); +app.use(bodyParser.json({ limit: Server.BodyLimit })); // parse application/json +app.use(bodyParser.urlencoded({ limit: Server.BodyLimit, extended: false })); // parse application/x-www-form-urlencoded +//Test if there is a +app.use("/api/stash", stashRouter); +const startServer = () => { + server = app.listen(port, () => { + console.log("Node version:" + process.versions.node); + console.log(`Duneserver listening on port ${port}!`); + }); + server.timeout = 10 * 60 * 1000; + server.on("connection", (socket) => { + // 10 minutes timeout + socket.setTimeout(10 * 60 * 1000); + }); + process.on("SIGINT", () => { + console.log("Recieved Shutdown Signal!"); + process.exit(); + }); + setInterval(() => storage.cleanZips(), Server.ZipRemovalInterval); +}; +startServer();