commit 707d862e01a7497a1f22879d314b865a35e0e85b
Author: smilerz <smilerz@gmail.com>
Date: Wed Apr 14 10:35:00 2021 -0500
works now
commit 3942a445ed4f2ccec57de25eacd86ea4e4dd6bdb
Author: smilerz <smilerz@gmail.com>
Date: Wed Apr 14 10:25:24 2021 -0500
updated serializer and api
commit 10dc746eb175c7f805a8a8ffa7ce49977a7ce97e
Author: smilerz <smilerz@gmail.com>
Date: Wed Apr 14 10:20:19 2021 -0500
fixed bookmarklet
commit 9779104902d3be0258c95cd2eeebcba0d5d48892
Merge: bb8262c 0cb3928
Author: smilerz <smilerz@gmail.com>
Date: Wed Apr 14 09:56:27 2021 -0500
Merge branch 'bookmarklet' into json_import
commit 0cb39284bb835ffc6cfee3e4306aadc4a64a25be
Author: smilerz <smilerz@gmail.com>
Date: Wed Apr 14 09:42:53 2021 -0500
retrieve bookmarklet ID from get
commit e89e0218de684d40b2e2bfb6ba833891206c828e
Author: smilerz <smilerz@gmail.com>
Date: Wed Apr 14 09:29:33 2021 -0500
Revert "fixed broken tab"
This reverts commit ca0a1aede3cc6cb3912bc1fe30c0aa22e3f481a6.
commit bb8262ccabb93c56fbc18c407d5a0653b8b3ca79
Merge: b1e73aa 35a7f62
Author: smilerz <smilerz@gmail.com>
Date: Sun Apr 11 20:35:57 2021 -0500
Merge branch 'main_fork' into json_import
192 lines
5.7 KiB
Python
192 lines
5.7 KiB
Python
import json
|
|
import re
|
|
|
|
from bs4 import BeautifulSoup
|
|
from bs4.element import Tag
|
|
from cookbook.helper import recipe_url_import as helper
|
|
from cookbook.helper.scrapers.scrapers import text_scraper
|
|
from json import JSONDecodeError
|
|
from recipe_scrapers._utils import get_host_name, normalize_string
|
|
|
|
|
|
def get_recipe_from_source(text, url, space):
|
|
def build_node(k, v):
|
|
if isinstance(v, dict):
|
|
node = {
|
|
'name': k,
|
|
'value': k,
|
|
'children': get_children_dict(v)
|
|
}
|
|
elif isinstance(v, list):
|
|
node = {
|
|
'name': k,
|
|
'value': k,
|
|
'children': get_children_list(v)
|
|
}
|
|
else:
|
|
node = {
|
|
'name': k + ": " + normalize_string(str(v)),
|
|
'value': normalize_string(str(v))
|
|
}
|
|
return node
|
|
|
|
def get_children_dict(children):
|
|
kid_list = []
|
|
for k, v in children.items():
|
|
kid_list.append(build_node(k, v))
|
|
return kid_list
|
|
|
|
def get_children_list(children):
|
|
kid_list = []
|
|
for kid in children:
|
|
if type(kid) == list:
|
|
node = {
|
|
'name': "unknown list",
|
|
'value': "unknown list",
|
|
'children': get_children_list(kid)
|
|
}
|
|
kid_list.append(node)
|
|
elif type(kid) == dict:
|
|
for k, v in kid.items():
|
|
kid_list.append(build_node(k, v))
|
|
else:
|
|
kid_list.append({
|
|
'name': normalize_string(str(kid)),
|
|
'value': normalize_string(str(kid))
|
|
})
|
|
return kid_list
|
|
|
|
recipe_json = {
|
|
'name': '',
|
|
'url': '',
|
|
'description': '',
|
|
'image': '',
|
|
'keywords': [],
|
|
'recipeIngredient': [],
|
|
'recipeInstructions': '',
|
|
'servings': '',
|
|
'prepTime': '',
|
|
'cookTime': ''
|
|
}
|
|
recipe_tree = []
|
|
parse_list = []
|
|
html_data = []
|
|
images = []
|
|
|
|
try:
|
|
parse_list.append(remove_graph(json.loads(text)))
|
|
if not url and 'url' in parse_list[0]:
|
|
url = parse_list[0]['url']
|
|
scrape = text_scraper("<script type='application/ld+json'>" + text + "</script>", url=url)
|
|
|
|
except JSONDecodeError:
|
|
soup = BeautifulSoup(text, "html.parser")
|
|
html_data = get_from_html(soup)
|
|
images += get_images_from_source(soup, url)
|
|
for el in soup.find_all('script', type='application/ld+json'):
|
|
el = remove_graph(el)
|
|
if not url and 'url' in el:
|
|
url = el['url']
|
|
if type(el) == list:
|
|
for le in el:
|
|
parse_list.append(le)
|
|
elif type(el) == dict:
|
|
parse_list.append(el)
|
|
for el in soup.find_all(type='application/json'):
|
|
el = remove_graph(el)
|
|
if type(el) == list:
|
|
for le in el:
|
|
parse_list.append(le)
|
|
elif type(el) == dict:
|
|
parse_list.append(el)
|
|
scrape = text_scraper(text, url=url)
|
|
|
|
recipe_json = helper.get_from_scraper(scrape, space)
|
|
|
|
for el in parse_list:
|
|
temp_tree = []
|
|
if isinstance(el, Tag):
|
|
try:
|
|
el = json.loads(el.string)
|
|
except TypeError:
|
|
continue
|
|
|
|
for k, v in el.items():
|
|
if isinstance(v, dict):
|
|
node = {
|
|
'name': k,
|
|
'value': k,
|
|
'children': get_children_dict(v)
|
|
}
|
|
elif isinstance(v, list):
|
|
node = {
|
|
'name': k,
|
|
'value': k,
|
|
'children': get_children_list(v)
|
|
}
|
|
else:
|
|
node = {
|
|
'name': k + ": " + normalize_string(str(v)),
|
|
'value': normalize_string(str(v))
|
|
}
|
|
temp_tree.append(node)
|
|
|
|
if '@type' in el and el['@type'] == 'Recipe':
|
|
recipe_tree += [{'name': 'ld+json', 'children': temp_tree}]
|
|
else:
|
|
recipe_tree += [{'name': 'json', 'children': temp_tree}]
|
|
|
|
return recipe_json, recipe_tree, html_data, images
|
|
|
|
|
|
def get_from_html(soup):
|
|
INVISIBLE_ELEMS = ('style', 'script', 'head', 'title')
|
|
html = []
|
|
for s in soup.strings:
|
|
if ((s.parent.name not in INVISIBLE_ELEMS) and (len(s.strip()) > 0)):
|
|
html.append(s)
|
|
return html
|
|
|
|
|
|
def get_images_from_source(soup, url):
|
|
sources = ['src', 'srcset', 'data-src']
|
|
images = []
|
|
img_tags = soup.find_all('img')
|
|
if url:
|
|
site = get_host_name(url)
|
|
prot = url.split(':')[0]
|
|
|
|
urls = []
|
|
for img in img_tags:
|
|
for src in sources:
|
|
try:
|
|
urls.append(img[src])
|
|
except KeyError:
|
|
pass
|
|
|
|
for u in urls:
|
|
u = u.split('?')[0]
|
|
filename = re.search(r'/([\w_-]+[.](jpg|jpeg|gif|png))$', u)
|
|
if filename:
|
|
if (('http' not in u) and (url)):
|
|
# sometimes an image source can be relative
|
|
# if it is provide the base url
|
|
u = '{}://{}{}'.format(prot, site, u)
|
|
if 'http' in u:
|
|
images.append(u)
|
|
return images
|
|
|
|
|
|
def remove_graph(el):
|
|
# recipes type might be wrapped in @graph type
|
|
if isinstance(el, Tag):
|
|
try:
|
|
el = json.loads(el.string)
|
|
if '@graph' in el:
|
|
for x in el['@graph']:
|
|
if '@type' in x and x['@type'] == 'Recipe':
|
|
el = x
|
|
except TypeError:
|
|
pass
|
|
return el
|