Skip to content

Commit

Permalink
Initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
pabloromeo committed Mar 20, 2024
0 parents commit ee1793b
Show file tree
Hide file tree
Showing 18 changed files with 1,905 additions and 0 deletions.
74 changes: 74 additions & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
# This is a basic workflow to help you get started with Actions

name: Builds Images

# Controls when the workflow will run
on:
workflow_dispatch:
push:
branches:
- "main"
- "dev"
tags:
- "v*.*.*"
pull_request:
branches:
- "main"
- "dev"

permissions:
contents: read
packages: write

# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
build-image:
runs-on: ubuntu-latest
steps:
# Get the repositery's code
- name: Checkout
uses: actions/checkout@v3

# https://github.com/docker/setup-qemu-action
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
# https://github.com/docker/setup-buildx-action
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2

- name: Login to GHCR
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}

- name: Docker meta for image
id: meta
uses: docker/metadata-action@v4
with:
# list of Docker images to use as base name for tags
images: |
ghcr.io/cloudx-labs/scrapex
# generate Docker tags based on the following events/attributes
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha
- name: Build and push image
uses: docker/build-push-action@v5
with:
context: ./scrapex
file: ./scrapex/Dockerfile
platforms: linux/amd64
provenance: false
cache-from: type=registry,ref=ghcr.io/cloudx-labs/scrapex:main
cache-to: type=inline
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
130 changes: 130 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
.pnpm-debug.log*

# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json

# Runtime data
pids
*.pid
*.seed
*.pid.lock

# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov

# Coverage directory used by tools like istanbul
coverage
*.lcov

# nyc test coverage
.nyc_output

# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt

# Bower dependency directory (https://bower.io/)
bower_components

# node-waf configuration
.lock-wscript

# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release

# Dependency directories
node_modules/
jspm_packages/

# Snowpack dependency directory (https://snowpack.dev/)
web_modules/

# TypeScript cache
*.tsbuildinfo

# Optional npm cache directory
.npm

# Optional eslint cache
.eslintcache

# Optional stylelint cache
.stylelintcache

# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/

# Optional REPL history
.node_repl_history

# Output of 'npm pack'
*.tgz

# Yarn Integrity file
.yarn-integrity

# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local

# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache

# Next.js build output
.next
out

# Nuxt.js build / generate output
.nuxt
dist

# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public

# vuepress build output
.vuepress/dist

# vuepress v2.x temp and cache directory
.temp
.cache

# Docusaurus cache and generated files
.docusaurus

# Serverless directories
.serverless/

# FuseBox cache
.fusebox/

# DynamoDB Local files
.dynamodb/

# TernJS port file
.tern-port

# Stores VSCode versions used for testing VSCode extensions
.vscode-test

# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*
21 changes: 21 additions & 0 deletions LICENSE
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
MIT License

Copyright (c) 2024 Cloud(x);

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
91 changes: 91 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
# Scrapex

## Introduction

Scrapex is a versatile scraping component designed to efficiently extract content from URLs. Leveraging the power of Playwright and Chrome, it ensures seamless support for Single Page Applications (SPAs) and content dependent on JavaScript execution. Initially developed for internal use by our AI Agents, Scrapex offers robust functionality for a wide range of scraping needs.

## Features

- _Support for Multiple Output Formats_: Scrapex can output data in HTML, Markdown, or PDF formats, catering to diverse requirements.
- _Container Image deployment_: For ease of deployment and scalability, Scrapex is fully compatible with Container environments such as Docker or Kubernetes.
- _Customizable Settings_: Through environment variables, as well as parameters in the extraction call, users can tailor the behavior of Scrapex to suit their specific scraping tasks.

## Configuration

Scrapex supports the following output formats:

1. _HTML_: Direct extraction of HTML content.
2. _Markdown_: Conversion of HTML to Markdown using `html-to-md`.
3. _PDF_: Generation of PDF documents utilizing Playwright's PDF functionality.

### Environment Variables

Configure Scrapex using the following environment variables:

| Variable | Description | Default |
| -------------------- | ------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------- |
| `PORT` | Port on which Node.js server listens | `3000` |
| `DEFAULT_WAIT` | Default milliseconds to wait on page load | `0` |
| `DEFAULT_USER_AGENT` | Default user agent for requests | `"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"` |
| `LOG_LEVEL` | Logging level (`debug`, `info`, `warn`, `error`) | `debug` |

## How to Run

The simplest way to run Scrapex is using Docker. Here's an example `docker-compose.yaml`:

```yaml
version: "3"
services:
app:
container_name: scrapex
image: ghcr.io/cloudx-labs/scrapex:main # it's better to pin down to a specific release version such as v0.1
environment:
- TZ=America/Argentina/Buenos_Aires
- PORT=3000
- LOG_LEVEL=debug
ports:
- "3003:3000"
```
## Usage Example
To test Scrapex, you can send a request using curl as shown below:
```bash
curl --location 'http://localhost:3003/extract' \
--header 'Content-Type: application/json' \
--data '{
"url": "https://en.wikipedia.org/wiki/Six_Degrees_of_Kevin_Bacon",
"outputType": "pdf",
"wait": 0,
"userAgent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"settings": {
"pdf": {
"options": {
"format": "A4"
}
}
}
}'
```

### Payload Parameters

The following table describes the parameters included in the payload of the `curl` example:
| Parameter | Description | Example |
|--------------|-------------------------------------------|---------------------------------------------------|
| url | URL of the page to scrape | https://en.wikipedia.org/wiki/Six_Degrees_of_Kevin_Bacon |
| outputType | Desired output format | html / md / pdf |
| wait | Milliseconds to wait before extraction | 2000 |
| userAgent | User agent to use for the request | Mozilla/5.0 (Windows NT 10.0; Win64; x64)... |
| settings | Additional settings for output formatting | { "pdf": { "options": { "format": "A4" } } } |

### Settings per extraction Type

#### PDF

All available values for `settings -> pdf -> options` can be found at: https://playwright.dev/docs/api/class-page#page-pdf

#### Markdown (MD)

All available values for `setting -> md -> options` can be found at: https://github.com/stonehank/html-to-md/blob/master/README-EN.md
6 changes: 6 additions & 0 deletions scrapex/.dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
**/node_modules
Dockerfile
docker-compose.yaml
.prettierrc
.eslintrc.json
.dockerignore
17 changes: 17 additions & 0 deletions scrapex/.eslintrc.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
{
"env": {
"node": true,
"es2021": true
},
"extends": "eslint:recommended",
"parserOptions": {
"ecmaVersion": "latest",
"sourceType": "module"
},
"plugins": ["import"],
"rules": {
"import/no-unresolved": 2,
"import/no-commonjs": 2,
"import/extensions": [2, "ignorePackages"]
}
}
8 changes: 8 additions & 0 deletions scrapex/.prettierrc
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
{
"semi": true,
"trailingComma": "es5",
"singleQuote": false,
"printWidth": 120,
"useTabs": true,
"endOfLine": "auto"
}
25 changes: 25 additions & 0 deletions scrapex/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#FROM node:20-alpine
FROM mcr.microsoft.com/playwright:v1.42.1-jammy

ENV NODE_ENV=production

RUN apt update && apt install dumb-init \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

WORKDIR /usr/src/app
COPY package*.json ./
RUN npm ci --only=production

# Add non-root user
RUN adduser --disabled-password --gecos '' appuser \
&& chown -R appuser:appuser /usr/src/app

# Switch to the non-root user
USER appuser

COPY . .

ENTRYPOINT ["/usr/bin/dumb-init", "--"]

CMD ["node", "-r", "dotenv/config", "./src/server.js"]
11 changes: 11 additions & 0 deletions scrapex/docker-compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
version: "3"
services:
app:
container_name: scrapex
build: .
environment:
- TZ=America/Argentina/Buenos_Aires
- PORT=3000
- LOG_LEVEL=debug
ports:
- "3003:3000"
Loading

0 comments on commit ee1793b

Please sign in to comment.