Merge pull request #1788 from gloriousDan/fix-import
Fix import with recipe-scrapers
This commit is contained in:
commit
cb59a6340d
@ -9,6 +9,8 @@ from recipe_scrapers._utils import get_host_name, normalize_string
|
|||||||
|
|
||||||
from cookbook.helper import recipe_url_import as helper
|
from cookbook.helper import recipe_url_import as helper
|
||||||
from cookbook.helper.scrapers.scrapers import text_scraper
|
from cookbook.helper.scrapers.scrapers import text_scraper
|
||||||
|
from recipe_scrapers import scrape_me
|
||||||
|
from recipe_scrapers._exceptions import NoSchemaFoundInWildMode
|
||||||
|
|
||||||
|
|
||||||
def get_recipe_from_source(text, url, request):
|
def get_recipe_from_source(text, url, request):
|
||||||
@ -63,7 +65,14 @@ def get_recipe_from_source(text, url, request):
|
|||||||
html_data = []
|
html_data = []
|
||||||
images = []
|
images = []
|
||||||
text = unquote(text)
|
text = unquote(text)
|
||||||
|
scrape = None
|
||||||
|
|
||||||
|
if url:
|
||||||
|
try:
|
||||||
|
scrape = scrape_me(url_path=url, wild_mode=True)
|
||||||
|
except(NoSchemaFoundInWildMode):
|
||||||
|
pass
|
||||||
|
if not scrape:
|
||||||
try:
|
try:
|
||||||
parse_list.append(remove_graph(json.loads(text)))
|
parse_list.append(remove_graph(json.loads(text)))
|
||||||
if not url and 'url' in parse_list[0]:
|
if not url and 'url' in parse_list[0]:
|
||||||
|
@ -114,7 +114,14 @@ def get_from_scraper(scrape, request):
|
|||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if source_url := scrape.url:
|
try:
|
||||||
|
source_url = scrape.canonical_url()
|
||||||
|
except Exception:
|
||||||
|
try:
|
||||||
|
source_url = scrape.url
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
if source_url:
|
||||||
recipe_json['source_url'] = source_url
|
recipe_json['source_url'] = source_url
|
||||||
try:
|
try:
|
||||||
keywords.append(source_url.replace('http://', '').replace('https://', '').split('/')[0])
|
keywords.append(source_url.replace('http://', '').replace('https://', '').split('/')[0])
|
||||||
@ -129,9 +136,11 @@ def get_from_scraper(scrape, request):
|
|||||||
ingredient_parser = IngredientParser(request, True)
|
ingredient_parser = IngredientParser(request, True)
|
||||||
|
|
||||||
recipe_json['steps'] = []
|
recipe_json['steps'] = []
|
||||||
|
try:
|
||||||
for i in parse_instructions(scrape.instructions()):
|
for i in parse_instructions(scrape.instructions()):
|
||||||
recipe_json['steps'].append({'instruction': i, 'ingredients': [], })
|
recipe_json['steps'].append({'instruction': i, 'ingredients': [], })
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
if len(recipe_json['steps']) == 0:
|
if len(recipe_json['steps']) == 0:
|
||||||
recipe_json['steps'].append({'instruction': '', 'ingredients': [], })
|
recipe_json['steps'].append({'instruction': '', 'ingredients': [], })
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from json import JSONDecodeError
|
from json import JSONDecodeError
|
||||||
from recipe_scrapers import SCRAPERS, get_host_name
|
from recipe_scrapers import SCRAPERS
|
||||||
from recipe_scrapers._factory import SchemaScraperFactory
|
from recipe_scrapers._factory import SchemaScraperFactory
|
||||||
from recipe_scrapers._schemaorg import SchemaOrg
|
from recipe_scrapers._schemaorg import SchemaOrg
|
||||||
|
|
||||||
@ -15,12 +15,6 @@ SCRAPERS.update(CUSTOM_SCRAPERS)
|
|||||||
|
|
||||||
|
|
||||||
def text_scraper(text, url=None):
|
def text_scraper(text, url=None):
|
||||||
domain = None
|
|
||||||
if url:
|
|
||||||
domain = get_host_name(url)
|
|
||||||
if domain in SCRAPERS:
|
|
||||||
scraper_class = SCRAPERS[domain]
|
|
||||||
else:
|
|
||||||
scraper_class = SchemaScraperFactory.SchemaScraper
|
scraper_class = SchemaScraperFactory.SchemaScraper
|
||||||
|
|
||||||
class TextScraper(scraper_class):
|
class TextScraper(scraper_class):
|
||||||
|
Loading…
Reference in New Issue
Block a user