cowidev.testing.incremental

cowidev.testing.incremental.africacdc

class cowidev.testing.incremental.africacdc.AfricaCDC[source]

Bases: CountryTestBase

_base_url = 'https://services8.arcgis.com/vWozsma9VzGndzx7/ArcGIS/rest/services/DailyCOVIDDashboard_5July21_1/FeatureServer/0/'
_parse_data(data) DataFrame[source]
_parse_date() str[source]
_parse_metrics(df: list) DataFrame[source]
columns_use: list = ['Country', 'Tests_Conducted']
date: str = None
export()[source]
increment_countries(df: DataFrame)[source]

Exports data to the relevant csv and logs the confirmation.

location: str = 'ACDC'
pipe_date(df: DataFrame) DataFrame[source]
pipe_filter_columns(df: DataFrame) DataFrame[source]
pipe_filter_entries(df: DataFrame) DataFrame[source]

Gets valid entries:

  • Countries not coming from OWID (avoid loop)

pipe_metadata(df: DataFrame) DataFrame[source]

Adds metadata to DataFrame

pipe_rename_countries(df: DataFrame) DataFrame[source]

Renames countries to match OWID naming convention.

pipeline(df: DataFrame)[source]

Pipeline for data

read() DataFrame[source]
rename_columns: dict = {'Country': 'location', 'Tests_Conducted': 'Cumulative total'}
source_label: str = 'Africa Centres for Disease Control and Prevention'
property source_url
property source_url_date
source_url_ref: str = 'https://africacdc.org/covid-19/'
units: str = 'tests performed'
cowidev.testing.incremental.africacdc.main()[source]

cowidev.testing.incremental.albania

class cowidev.testing.incremental.albania.Albania[source]

Bases: object

_base_url = 'https://shendetesia.gov.al'

Extract link and date from relevant element.

_get_relevant_element(soup: BeautifulSoup) Tag[source]

Get the relevant element in news feed.

_get_text_from_url(url: str) str[source]

Extract text from the url.

_num_max_pages = 3
_parse_data(soup: BeautifulSoup) tuple[source]

Get data from the source page.

_parse_date_from_element(elem: Tag) str[source]

Get date from relevant element.

Get link from relevant element.

_parse_metrics(text: str) int[source]

Get metrics from news text.

_url_subdirectory = 'category/lajme/page'
export()[source]
location = 'Albania'
notes = ''
read() Series[source]
regex = {'count': '(\\d+) testime nga cilat kanë rezultuar pozitivë me COVID19', 'date': '(\\d+\\/\\d+\\/\\d+)', 'title': 'COVID19/ Ministria e Shëndetësisë:'}
source_label = 'Ministry of Health and Social Protection'
units = 'tests performed'
cowidev.testing.incremental.albania.main()[source]

cowidev.testing.incremental.antigua_barbuda

class cowidev.testing.incremental.antigua_barbuda.AntiguaBarbuda[source]

Bases: CountryTestBase

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(elem: Tag) str[source]

Parse date from element

_parse_metrics(elem: Tag) int[source]

Parse metrics from element

export()[source]

Export data to csv

location: str = 'Antigua and Barbuda'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

source_label: str = 'Ministry of Health'
source_url: str = 'https://covid19.gov.ag'
source_url_ref: str = 'https://covid19.gov.ag'
units: str = 'tests performed'
cowidev.testing.incremental.antigua_barbuda.main()[source]

cowidev.testing.incremental.azerbaijan

class cowidev.testing.incremental.azerbaijan.Azerbaijan[source]

Bases: CountryTestBase

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_metrics(elem: Tag) int[source]

Parse metrics from element

export()[source]

Export data to csv

location: str = 'Azerbaijan'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

source_label: str = 'Cabinet of Ministers of Azerbaijan'
source_url: str = 'https://koronavirusinfo.az/az/page/statistika/azerbaycanda-cari-veziyyet'
source_url_ref: str = 'https://koronavirusinfo.az/az/page/statistika/azerbaycanda-cari-veziyyet'
units: str = 'tests performed'
cowidev.testing.incremental.azerbaijan.main()[source]

cowidev.testing.incremental.bahamas

class cowidev.testing.incremental.bahamas.Bahamas[source]

Bases: CountryTestBase

_base_url: str = 'https://www.bahamas.gov.bs'
_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(soup: BeautifulSoup) str[source]

Parse date from soup

Parse the article url from soup

_parse_metrics(table: DataFrame) int[source]

Parse metrics from table

_parse_pdf_table() DataFrame[source]

Parse pdf table from link

column_to_check: str = 'Total # of RT-'
export()[source]

Export data to csv

location: str = 'Bahamas'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'title': 'COVID-19 Report Update'}
source_label: str = 'Ministry of Health and Wellness'
source_url: str = 'https://www.bahamas.gov.bs/wps/portal/public/gov/government/news/!ut/p/b1/vZfJkqM4EIafpR6AsiQQgqNYbbMjwOCLwyvewAu2WZ5-XBPTMV3dPa45lI1ORGTGp_xzkdQb99LeuJzeNvn0sjmU0_3H_1ic8MB0KBUkRzJECgYxb_DYorwZwLtBdjcA__FR8NnfxEC8-_s29UkfeRbqjXppP7s22sHJDapM1tko9PXQnUehe92PiHixcwHpzV455OKSSxf2iQgj1TBGDm7ramRfJx1JS7nRrWbu7c6xGJ6yel0dCrnYggBXgCmRml0Pc7FE8z2Ty5tk8Ae7JcoQL1REy3S10QU85IOJ5-mic49bSy4wn825Zi2VYgPBPzE-COJRjBB_4S-A_6fxA8AX_qPe-OEWP3bwt8GjNH6VSLd_KJa97G5GfjKLEgAGENCQwQBILuxFvRQIE7Ztj4Nu14VbULdut3BZ5ADQxSDSUubOdFBdVOjocAh10Dq60EaRYTOwQ8y1_UUSxgpVOU-fnn8FeiiS70AVilaMAQPCs4Em9qS7ECTyKUbA9OCrgfxLJTV9Dzw9wk9FIwTfn8Nhb7yZFe_1vHgH75IsQkwkQZZEIAgS6SXbDBBNGtT6wJ-o1dWVTVwEYFNLRp4F62V8OcyZxbh9CH3On-rVVu-4mV4h61yPd7NAoZvT3rVnXRRKMvCahTkw2Db3g4Oe2lZnBH3BrXWzTgsaJ8Ar2hvHcd5YvRS5m17Xx2V_OYzLhJumHe5OS1_g-dt6tmvNOYG-k11QMuYWFc-z0aoKkRgZZxMKocFWWRpF81Vl-qfL2s3f3h4La1q_12oXoEivkaPXHdwmbqT5gRvvWndhQaYNWmAnAxZltRPfBe4GHbv8EJauHcZmj7tRNfCzgZ-bA1DyYuAfavXJQPHVwGdJGiggYLbMU0AxBoM-iggxYx5g_tURvrpKxVfnUPz-Kv00UXlZJhLEkiBBCUEo9ZJhVsjajtaGPl6F2ZJcyyVix62eVXRzoYBN4nJhmdwti87lSpkgz-ycW07VzbZfcGjYikEXzAJM6K3MjaE6XS5n_WmrHW3zZnt93DqTMY2Si4tnkkayUhGLaHE8-9N0WjoypzbqTVeVEkz44KupKJHfC5yEjrNVrPttooE7hBwtjd3IsJjsQcdO7mcQ6FgMHXcbWmybIeey-iHOPJ9W2Rd3FJN_NvDXqSi-GoifBTQ9J1A_JI2UD0n5fp-KNu_p8NXA78_hp47CAsRYhkgWEUEEko-OaolWDWrt_mgahkiCR5EMpuE1cFZqvhueA5aWh8WaobNU5te95zadt_S5DAf64OTOpjNxEtjtjpLY3NDriCUnLSnIfOvIKjuUOlj5zc7FrWKfV81yhfmDhZrQxEgvjfR8PRX16XStbC297BS8kEHa3GdGTO_ddSzim2XZYtivofTv4ovbH5b0s0n99heUoIvF/dl4/d5/L2dBISEvZ0FBIS9nQSEh/'
source_url_ref: str = None
units: str = 'tests performed'
cowidev.testing.incremental.bahamas.main()[source]

cowidev.testing.incremental.bahrain

class cowidev.testing.incremental.bahrain.Bahrain[source]

Bases: object

_parse_data()[source]
export()[source]
location: str = 'Bahrain'
notes: str = ''
source_label: str = 'Ministry of Health'
source_url: str = 'https://www.moh.gov.bh/COVID19'
testing_type: str = 'PCR only'
units: str = 'units unclear'
cowidev.testing.incremental.bahrain.main()[source]

cowidev.testing.incremental.bangladesh

class cowidev.testing.incremental.bangladesh.Bangladesh[source]

Bases: object

_parse_data()[source]
export()[source]
location: str = 'Bangladesh'
notes: str = ''
source_label: str = 'Government of Bangladesh'
source_url: str = 'https://dghs-dashboard.com/pages/covid19.php'
units: str = 'tests performed'
cowidev.testing.incremental.bangladesh.main()[source]

cowidev.testing.incremental.barbados

class cowidev.testing.incremental.barbados.Barbados[source]

Bases: CountryTestBase

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(soup: BeautifulSoup) str[source]

Parse date from soup

_parse_metrics(soup: BeautifulSoup) int[source]

Parse metrics from soup

export()[source]

Export data to csv

location: str = 'Barbados'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'count': 'has conducted (\\d+) tests', 'title': 'COVID-19 Update'}
source_label: str = 'Ministry of Health'
source_url: str = 'https://gisbarbados.gov.bb/top-stories/'
source_url_ref: str = None
units: str = 'tests performed'
cowidev.testing.incremental.barbados.main()[source]

cowidev.testing.incremental.belarus

class cowidev.testing.incremental.belarus.Belarus[source]

Bases: CountryTestBase

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(soup: BeautifulSoup) str[source]

Parse date from soup

_parse_metrics(elem: Tag) int[source]

Parse metrics from element

export()[source]

Export data to csv

location: str = 'Belarus'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'date': '\\d{1,2}\\.\\d{1,2}\\.\\d{4}г.', 'element': 'ПРОВЕДЕНО ТЕСТОВ'}
source_label: str = 'Ministry of health'
source_url: str = 'https://stopcovid.belta.by/'
source_url_ref: str = 'https://stopcovid.belta.by/'
units: str = 'tests performed'
cowidev.testing.incremental.belarus.main()[source]

cowidev.testing.incremental.belize

class cowidev.testing.incremental.belize.Belize[source]

Bases: CountryTestBase

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(soup: BeautifulSoup) str[source]

Parse date from soup

_parse_metrics(elem: Tag) int[source]

Parse metrics from element

export()[source]

Export data to csv

location: str = 'Belize'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'element': 'Tests Completed'}
source_label: str = 'Ministry of Health and Wellness'
source_url: str = 'https://sib.org.bz/covid-19/by-the-numbers/'
source_url_ref: str = 'https://sib.org.bz/covid-19/by-the-numbers/'
units: str = 'tests performed'
cowidev.testing.incremental.belize.main()[source]

cowidev.testing.incremental.benin

class cowidev.testing.incremental.benin.Benin[source]

Bases: CountryTestBase

export()[source]
location: str = 'Benin'
cowidev.testing.incremental.benin.main()[source]

cowidev.testing.incremental.bulgaria

class cowidev.testing.incremental.bulgaria.Bulgaria[source]

Bases: object

_parse_count(soup)[source]
_parse_data()[source]
export()[source]
location: str = 'Bulgaria'
source_label: str = 'Bulgaria COVID-10 Information Portal'
source_url: str = 'https://coronavirus.bg/bg/statistika'
units: str = 'tests performed'
cowidev.testing.incremental.bulgaria.main()[source]

cowidev.testing.incremental.cambodia

class cowidev.testing.incremental.cambodia.Cambodia[source]

Bases: CountryTestBase

export()[source]
location: str = 'Cambodia'
cowidev.testing.incremental.cambodia.main()[source]

cowidev.testing.incremental.cape_verde

class cowidev.testing.incremental.cape_verde.CapeVerde[source]

Bases: object

_get_relevant_element(soup: BeautifulSoup) Tag[source]

Get the relevant element from the source page.

_get_text_from_url(url: str) str[source]

Extract text from the url.

_parse_data(soup: BeautifulSoup) dict[source]

Get data from the source page.

_parse_date(text: str) str[source]

Get date from relevant element.

Get link from relevant element.

_parse_metrics(text: str) int[source]

Get metrics from the text.

export()[source]

Export data to CSV.

location = 'Cape Verde'
read() Series[source]

Read data from source.

regex = {'count': '(?:total|totais) (?:de|dos|das) (\\d+) (?:resultados|amostras)', 'date': '(\\d+) (?:de )?(\\w+) de (20\\d+)'}
source_label = 'Government of Cape Verde'
source_url = 'https://covid19.cv/category/boletim-epidemiologico/'
units = 'tests performed'
cowidev.testing.incremental.cape_verde.main()[source]

cowidev.testing.incremental.croatia

class cowidev.testing.incremental.croatia.Croatia[source]

Bases: CountryTestBase

_get_relevant_element(driver: WebDriver) WebElement[source]

Get the relevant element

_get_text_from_element(elem: WebElement) str[source]

Extract text from the element.

_parse_data(driver: WebDriver) dict[source]

Get data from the source page.

_parse_date_from_text(text: str) str[source]

Get date from text.

_parse_metrics(text: str) int[source]

Get metrics from text.

export()[source]
location: str = 'Croatia'
read() Series[source]

Read data from source.

regex = {'count': 'Do danas je ukupno testirano ([\\d\\.]+) osoba', 'date': 'Objavljeno: ([\\d\\.]{10})'}
source_label: str = 'Government of Croatia'
source_url_ref: str = 'https://www.koronavirus.hr/najnovije/ukupno-dosad-382-zarazene-osobe-u-hrvatskoj/35'
units: str = 'people tested'
cowidev.testing.incremental.croatia.main()[source]

cowidev.testing.incremental.el_salvador

class cowidev.testing.incremental.el_salvador.ElSalvador[source]

Bases: CountryTestBase

_get_data_id_from_source(source_url: str) str[source]

Get Data ID from source

_load_data(data_id: str) DataFrame[source]

Load data from source

export()[source]

Export data to csv

location: str = 'El Salvador'
pipe_date(df: DataFrame) DataFrame[source]

Clean date

pipe_merge(df: DataFrame) DataFrame[source]
pipe_numeric(df: DataFrame) DataFrame[source]

Clean numeric columns

pipe_positive(df: DataFrame) DataFrame[source]
pipe_pr(df: DataFrame) DataFrame[source]

Calculate Positive Rate

pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'element': 'window\\.infographicData=({.*})', 'title': "\\'PRUEBAS REALIZADAS\\'\\, \\'CASOS POSITIVOS\\'"}
rename_columns: dict = {'CASOS POSITIVOS': 'positive', 'PRUEBAS REALIZADAS': 'Daily change in cumulative total'}
source_label: str = 'Government of El Salvador'
source_url: str = 'https://e.infogram.com/'
source_url_ref: str = 'https://covid19.gob.sv/'
units: str = 'tests performed'
cowidev.testing.incremental.el_salvador.main()[source]

cowidev.testing.incremental.emro

class cowidev.testing.incremental.emro.EMRO[source]

Bases: CountryTestBase

_base_url: str = 'http://www.emro.who.int'
_parse_data(soup: BeautifulSoup) DataFrame[source]

Parses data from soup

_parse_date(df_list: list) str[source]

Parses date from DataFrame list

_parse_metrics(df_list: list) DataFrame[source]

Parses metrics from DataFrame list

_parse_pdf_table() list[source]

Parses pdf table

_parse_pdf_url(soup: BeautifulSoup) str[source]

Parses pdf url from soup

property area: list

Areas of pdf to be extracted

Returns:

[[y1, x1, y2, x2], …]

Return type:

list

For more info see: https://github.com/tabulapdf/tabula-java/wiki/Using-the-command-line-tabula-extractor-tool

columns_to_check: dict = {'date': 'Table 1: Epidemiological situation in the Eastern Mediterranean Region', 'tests': 'Total Tests'}
columns_use: list = ['Country', 'Total Tests']
date: str = None
export()[source]

Exports data to csv.

increment_countries(df: DataFrame)[source]

Exports data to the relevant csv and logs the confirmation.

location: str = 'EMRO'
pipe_filter_entries(df: DataFrame) DataFrame[source]

Gets valid entries:

  • Countries not coming from OWID (avoid loop)

pipe_metadata(df: DataFrame) DataFrame[source]

Adds metadata to DataFrame

pipe_rename_countries(df: DataFrame) DataFrame[source]

Renames countries to match OWID naming convention.

pipeline(df: DataFrame)[source]

Pipeline for data

read() DataFrame[source]

Reads data from source.

regex: dict = {'date': '(\\d{1,2} \\w+ 20\\d{2})'}
rename_columns: dict = {'Country': 'location', 'Total Tests': 'Cumulative total'}
source_label: str = 'WHO Regional Office for the Eastern Mediterranean'
source_url: str = 'http://www.emro.who.int/health-topics/corona-virus/situation-reports.html'
units: str = 'tests performed'
cowidev.testing.incremental.emro.main()[source]

cowidev.testing.incremental.equatorial_guinea

class cowidev.testing.incremental.equatorial_guinea.EquatorialGuinea[source]

Bases: CountryTestBase

export()[source]
location: str = 'Equatorial Guinea'
cowidev.testing.incremental.equatorial_guinea.main()[source]

cowidev.testing.incremental.faeroe_islands

class cowidev.testing.incremental.faeroe_islands.FaeroeIslands[source]

Bases: object

_parse_data() Series[source]
export()[source]
location: str = 'Faeroe Islands'
notes: str = ''
source_label: str = 'The Government of the Faeroe Islands'
source_url: str = 'https://corona.fo/json/stats'
source_url_ref: str = 'https://corona.fo/api'
units: str = 'people tested'
cowidev.testing.incremental.faeroe_islands.main()[source]

cowidev.testing.incremental.fiji

class cowidev.testing.incremental.fiji.Fiji[source]

Bases: object

__element = None
_get_list_of_elements(soup: BeautifulSoup) None[source]

Get the relevant elements list from the source page.

_get_relevant_element_and_year() tuple[source]

Get the relevant element and year from the element list.

_get_text_from_url(url: str) str[source]

Extract text from the url.

_num_max_pages = 3
_num_rows_per_page = 3
_parse_data(soup: BeautifulSoup) tuple[source]

Get data from the source page.

_parse_date_from_text(year: str, text: str) str[source]

Get date from relevant element.

Get link from relevant element.

_parse_metrics(text: str) int[source]

Get metrics from news text.

export()[source]

Export data to csv.

location = 'Fiji'
notes = ''
read() Series[source]

Read data from source.

regex = {'count': 'tests since 2020 are (\\d+,\\d+)', 'date': 'tests have been reported for (\\w+ \\d+)', 'title': 'COVID-19 Update', 'year': '\\d{4}'}
source_label = 'Fiji Ministry of Health & Medical Services'
source_url = 'https://www.health.gov.fj/page/'
units = 'tests performed'
cowidev.testing.incremental.fiji.main()[source]

cowidev.testing.incremental.georgia

class cowidev.testing.incremental.georgia.Georgia[source]

Bases: object

_base_url = 'https://agenda.ge'

Extract link and date from relevant element.

_get_relevant_element(soup: BeautifulSoup) Tag[source]

Get the relevant element in news feed.

_get_text_from_url(url: str) str[source]

Extract text from the url.

_num_max_pages = 3
_parse_data(soup: BeautifulSoup) tuple[source]

Get data from the source page.

_parse_date_from_element(elem: Tag) str[source]

Get date from relevant element.

Get link from relevant element.

_parse_metrics(text: str) int[source]

Get metrics from news text.

_url_query_pt1 = 'https://agenda.ge/ajax/get-nodes?pageOptions%5Btag%5D=&pageOptions%5Byear%5D=2022&pageOptions%5Btype%5D=news&pageOptions%5Blang%5D=en&listOptions%5Byear%5D=all&listOptions%5Bmonth%5D=0&listOptions%5Bday%5D=0&listOptions%5Bpage%5D='
_url_query_pt2 = '&listOptions%5Bcount%5D=16&listOptions%5Brange%5D=all&listOptions%5Brows%5D=4&listOptions%5Bcolumns%5D=4&listOptions%5Brubric%5D=&listOptions%5Bcategory%5D='
export()[source]
location = 'Georgia'
notes = ''
read() Series[source]
regex = {'count': '(\\d+) tests .*? 24 hours', 'date': '(\\d+ \\w+ \\d+)', 'title': '(Georgia reports)|(coronavirus)'}
source_label = 'Government of Georgia'
units = 'tests performed'
cowidev.testing.incremental.georgia.main()[source]

cowidev.testing.incremental.gibraltar

class cowidev.testing.incremental.gibraltar.Gibraltar[source]

Bases: CountryTestBase

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(text: str) str[source]

Parse date from text

_parse_metrics(text: str) int[source]

Parse metrics from text

export()[source]

Export data to csv

location: str = 'Gibraltar'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'count': 'Results received: ([\\d,]+)', 'date': 'valid as of (\\d{1,2})\\w+ (\\w+ 20\\d{2})'}
source_label: str = 'The Department of Public Health'
source_url: str = 'https://healthygibraltar.org/news/update-on-wuhan-coronavirus/'
source_url_ref: str = 'https://healthygibraltar.org/news/update-on-wuhan-coronavirus/'
units: str = 'tests performed'
cowidev.testing.incremental.gibraltar.main()[source]

cowidev.testing.incremental.greece

class cowidev.testing.incremental.greece.Greece[source]

Bases: CountryTestBase

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(soup: BeautifulSoup) str[source]

Parse date from soup

_parse_metrics(elem: Tag) int[source]

Parse metrics from element

export()[source]

Export data to csv

location: str = 'Greece'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'date': 'elementor-element-5b9d061', 'element': 'elementor-element-9df72a6'}
source_label: str = 'National Organization of Public Health'
source_url: str = 'https://covid19.gov.gr/'
source_url_ref: str = 'https://covid19.gov.gr/'
units: str = 'samples tested'
cowidev.testing.incremental.greece.main()[source]

cowidev.testing.incremental.haiti

class cowidev.testing.incremental.haiti.Haiti[source]

Bases: CountryTestBase

_extract_text_from_url() str[source]

Extracts text from pdf.

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parses data from soup.

_parse_date(link: str) str[source]

Gets date from link.

_parse_metrics(text: str) DataFrame[source]

Parses metrics from data.

export()[source]

Exports data to CSV.

location: str = 'Haiti'
pipe_date(df: DataFrame) DataFrame[source]

Pipes date.

pipeline(df: DataFrame) DataFrame[source]

Pipeline for data.

read() DataFrame[source]

Reads data from source.

regex: dict = {'date': '(\\d{1,2}\\-\\d{1,2}\\-20\\d{2})', 'metrics': 'INDICATEURS ([\\d,]+)', 'title': 'surveillance du nouveau Coronavirus \\(COVID-19\\)'}
source_label: str = 'Ministry of Public Health and Population'
source_url: dict = 'https://www.mspp.gouv.ht/documentation/'
source_url_ref: str = None
units: str = 'tests performed'
cowidev.testing.incremental.haiti.main()[source]

cowidev.testing.incremental.iran

class cowidev.testing.incremental.iran.Iran[source]

Bases: object

_base_url = 'https://irangov.ir'

Extract link and date from relevant element.

_get_relevant_element(soup: BeautifulSoup) Tag[source]

Get the relevant element in news feed.

_get_text_from_url(url: str) str[source]

Extract text from the url.

_num_max_pages = 3
_parse_data(soup: BeautifulSoup) tuple[source]

Get data from the source page.

_parse_date_from_element(elem: Tag) str[source]

Get date from relevant element.

Get link from relevant element.

_parse_metrics(text: str) int[source]

Get metrics from news text.

_url_subdirectory = 'ministry-of-health-and-medical-education'
export()[source]
location = 'Iran'
notes = ''
read() Series[source]
regex = {'count': '(\\d+) COVID-19 tests have been taken across the country so far', 'date': '(\\d+\\-\\d+\\-\\d+)', 'title': "Health Ministry's Updates on COVID-19"}
source_label = 'Ministry of Health and Medical Education'
units = 'tests performed'
cowidev.testing.incremental.iran.main()[source]

cowidev.testing.incremental.jordan

class cowidev.testing.incremental.jordan.Jordan[source]

Bases: CountryTestBase

_df_builder(count: str) DataFrame[source]

Builds dataframe from the text data

_request() dict[source]

Requests data from source.

_week_to_date(week: int) str[source]

Converts week to date.

export()[source]

Exports data to CSV.

property headers

Headers for the request

location: str = 'Jordan'
notes: str = ''
payload(week: str | None = None) dict[source]

Request payload

pipe_date(df: DataFrame) DataFrame[source]

Pipes date.

pipeline(df: DataFrame) DataFrame[source]

Pipeline for data.

read() DataFrame[source]

Reads the data from the source

source_label: str = 'Ministry of Health'
source_url: str = 'https://wabi-west-europe-d-primary-api.analysis.windows.net/public/reports/querydata?synchronous=true'
source_url_ref: str = 'https://corona.moh.gov.jo/ar'
units: str = 'tests performed'
week: str = 33
cowidev.testing.incremental.jordan.main()[source]

cowidev.testing.incremental.kosovo

class cowidev.testing.incremental.kosovo.Kosovo[source]

Bases: CountryTestBase

_parse_data(driver: WebDriver) DataFrame[source]

Parse data from source

_parse_date(date: str) str[source]

Parse date from soup

export()[source]

Export data to csv

location: str = 'Kosovo'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'count': 'Gjithsej testuar', 'date': 'Përditësimi i fundit:'}
source_label: str = 'National Institute of Public Health of Kosovo'
source_url: str = 'https://datastudio.google.com/embed/u/0/reporting/2e546d77-8f7b-4c35-8502-38533aa0e9e8/page/tI3oB'
source_url_ref: str = 'https://datastudio.google.com/embed/u/0/reporting/2e546d77-8f7b-4c35-8502-38533aa0e9e8'
units: str = 'tests performed'
cowidev.testing.incremental.kosovo.main()[source]

cowidev.testing.incremental.laos

class cowidev.testing.incremental.laos.Laos[source]

Bases: object

_get_relevant_element(soup: BeautifulSoup) Tag[source]

Gets element from the soup.

_get_text_from_element(elem: Tag) str[source]

Gets text from element.

_parse_data(soup: BeautifulSoup) dict[source]

Gets data from the source page.

_parse_date(text: str) str[source]

Gets date from the text.

_parse_metrics(text: str) int[source]

Gets metrics from the text.

_source_url = 'https://www.covid19.gov.la/index.php'
export()[source]

Exports data to csv.

location = 'Laos'
notes = ''
read() Series[source]

Reads data from source.

regex = {'date': 'ຂໍ້ມູນ ເວລາ .*? (\\d+\\/\\d+\\/\\d+)', 'tests': 'ຮັບການກວດມື້ນີ້ (\\d+)'}
source_label = 'Ministry of Health'
units = 'tests performed'
cowidev.testing.incremental.laos.main()[source]

cowidev.testing.incremental.lebanon

class cowidev.testing.incremental.lebanon.Lebanon[source]

Bases: object

_parse_count(soup: str) str[source]
_parse_date(soup: str) str[source]
export()[source]
location = 'Lebanon'
notes = ''
read()[source]
regex = {'date': '([A-Za-z]+ \\d+)'}
source_label = 'Lebanon Ministry of Health'
source_url = 'https://corona.ministryinfo.gov.lb/'
units = 'tests performed'
cowidev.testing.incremental.lebanon.main()[source]

cowidev.testing.incremental.libya

class cowidev.testing.incremental.libya.Libya[source]

Bases: object

_get_relevant_element(soup: BeautifulSoup) Tag[source]

Get the relevant element in soup.

_parse_data(soup: BeautifulSoup) dict[source]

Get data from the source page.

_parse_date_from_soup(soup: BeautifulSoup) str[source]

Get date from soup.

_parse_metrics(elem: Tag) int[source]

Get metrics from element.

export()[source]
location = 'Libya'
notes = ''
read() Series[source]

Read data from source.

regex = {'date': '(\\d+ \\/ \\d+ \\/ \\d+.)', 'samples': 'عدد العينات'}
source_label = 'Libya National Centre for Disease Control'
source_url = 'https://ncdc.org.ly/Ar'
units = 'samples tested'
cowidev.testing.incremental.libya.main()[source]

cowidev.testing.incremental.maldives

class cowidev.testing.incremental.maldives.Maldives[source]

Bases: CountryTestBase

_parse_data(data: dict) DataFrame[source]

Parse data.

export()[source]

Export data to CSV.

location: str = 'Maldives'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data.

read() DataFrame[source]

Read data from source.

regex: dict = {'date': '(\\d{1,2}\\/\\d{1,2}\\/20\\d{2})'}
source_label: str = 'Maldives Health Protection Agency'
source_url: str = 'https://covid19.health.gov.mv/v2_data.json'
source_url_ref: str = 'https://covid19.health.gov.mv/en'
units: str = 'samples tested'
cowidev.testing.incremental.maldives.main()[source]

cowidev.testing.incremental.moldova

class cowidev.testing.incremental.moldova.Moldova[source]

Bases: object

_get_relevant_element(soup: BeautifulSoup) Tag[source]

Get the relevant element in news feed.

_get_text_from_url(url: str) str[source]

Extract text from the url.

_parse_data(soup: BeautifulSoup) tuple[source]

Get data from the source page.

_parse_date_from_text(text: str) str[source]

Get date from text.

_parse_metrics(text: str) int[source]

Get metrics from news text.

export()[source]

Export data to CSV.

location = 'Moldova'
notes = ''
read() Series[source]

Read data from source.

regex = {'count': '((\\d+)) teste.|(\\d+) de teste', 'date': '(\\d+\\/\\d+\\/\\d+)', 'title': '(cazuri noi de COVID-19)|(cazuri de COVID-19)|(cazuri de COVID-19,)'}
source_label = 'Ministry of Health of the Republic of Moldova'
source_url = 'https://msmps.gov.md/media/comunicate/'
units = 'tests performed'
cowidev.testing.incremental.moldova.main()[source]

cowidev.testing.incremental.mongolia

class cowidev.testing.incremental.mongolia.Mongolia[source]

Bases: object

export()[source]
location = 'Mongolia'
notes = ''
read()[source]
source_label = 'Ministry of Health'
source_url = 'https://e-mongolia.mn/shared-api/api/covid-stat/daily'
source_url_ref = 'https://www1.e-mongolia.mn/covid-19'
units = 'samples tested'
cowidev.testing.incremental.mongolia.main()[source]

cowidev.testing.incremental.morocco

class cowidev.testing.incremental.morocco.Morocco[source]

Bases: CountryTestBase

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(soup: BeautifulSoup) str[source]

Parse date from soup

_parse_metrics(soup: BeautifulSoup) int[source]

Parse metrics from soup

export()[source]

Export data to csv

location: str = 'Morocco'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'count': '\\s+', 'date': '00 (\\d{1,2}\\-\\d{2}\\-20\\d{2})'}
source_label: str = 'Ministry of Health'
source_url: str = 'http://www.covidmaroc.ma/Pages/AccueilAR.aspx'
source_url_ref: str = 'http://www.covidmaroc.ma/Pages/AccueilAR.aspx'
units: str = 'people tested'
cowidev.testing.incremental.morocco.main()[source]

cowidev.testing.incremental.myanmar

class cowidev.testing.incremental.myanmar.Myanmar[source]

Bases: CountryTestBase

_parse_date(soup: BeautifulSoup) str[source]

Parse date from soup

export()[source]

Export data to csv

location: str = 'Myanmar'
pipe_metrics(df: DataFrame) DataFrame[source]

Parse metrics from source

pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'date': 'as of (\\d{1,2}\\-\\d{1,2}\\-20\\d{2})'}
source_label: str = 'Ministry of Health'
source_url: str = 'https://services7.arcgis.com/AB2LoFxJT2bJUJYC/arcgis/rest/services/CaseCount_130720/FeatureServer/0/query?f=json&where=1%3D1&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&outStatistics=%5B%7B%22statisticType%22%3A%22sum%22%2C%22onStatisticField%22%3A%22Tested%22%2C%22outStatisticFieldName%22%3A%22value%22%7D%5D&resultType=standard'
source_url_ref: str = 'https://mohs.gov.mm/Main/content/publication/2019-ncov'
units: str = 'samples tested'
cowidev.testing.incremental.myanmar.main()[source]

cowidev.testing.incremental.nepal

class cowidev.testing.incremental.nepal.Nepal[source]

Bases: CountryTestBase

_extract_text_from_url() str[source]

Extracts text from pdf.

_parse_data(links: dict) DataFrame[source]

Parses data from link.

_parse_date(link: str) str[source]

Get date from link.

_parse_metrics(text: str) DataFrame[source]

Parses metrics from data.

export()[source]

Exports data to CSV.

location: str = 'Nepal'
pipe_date(df: DataFrame) DataFrame[source]

Pipes date.

pipe_pr(df: DataFrame) DataFrame[source]

Calculate Positive Rate

pipeline(df: DataFrame) DataFrame[source]

Pipeline for data.

read() DataFrame[source]

Reads data from source.

regex: dict = {'date': '(\\d{1,2}\\-\\d{1,2}\\-20\\d{2})', 'metrics': 'PCR \\| Antigen (\\d+) (\\d+) PCR \\| Antigen (\\d+) (\\d+)'}
source_label: str = 'Ministry of Health and Population'
source_url: dict = {'api': 'https://covid19.mohp.gov.np/covid/api/ministryrelease', 'base': 'https://covid19.mohp.gov.np/covid/englishSituationReport/'}
source_url_ref: str = None
units: str = 'samples tested'
cowidev.testing.incremental.nepal.main()[source]

cowidev.testing.incremental.new_zealand

class cowidev.testing.incremental.new_zealand.NewZealand[source]

Bases: object

_parse_date()[source]
_parse_metric()[source]
export()[source]
location = 'New Zealand'
notes = ''
read()[source]
source_label = 'Ministry of Health'
source_url = 'https://www.health.govt.nz/our-work/diseases-and-conditions/covid-19-novel-coronavirus/covid-19-data-and-statistics/covid-19-testing-data'
units = 'tests performed'
cowidev.testing.incremental.new_zealand.main()[source]

cowidev.testing.incremental.nicaragua

class cowidev.testing.incremental.nicaragua.Nicaragua[source]

Bases: CountryTestBase

_base_url: str = 'http://www.minsa.gob.ni/index.php/repository/func-download'
_extract_text_from_url(link) str[source]

Extracts text from pdf.

_get_download_url(soup: BeautifulSoup) str[source]
_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(soup: BeautifulSoup) Iterator[source]

parses the date from the week number.

export()[source]
location: str = 'Nicaragua'
pipeline(df: DataFrame) DataFrame[source]
read() DataFrame[source]

Read data from source

regex: dict = {'title': 'Boletín Epidemiológico de la Semana No. '}
source_label: str = 'Ministry of Health'
source_url_ref: str = 'http://www.minsa.gob.ni/index.php/repository/Descargas-MINSA/COVID-19/Boletines-Epidemiol%C3%B3gico/Boletines-2022/'
units: str = 'tests performed'
cowidev.testing.incremental.nicaragua.main()[source]

cowidev.testing.incremental.north_macedonia

class cowidev.testing.incremental.north_macedonia.NorthMacedonia[source]

Bases: CountryTestBase

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(soup: BeautifulSoup) str[source]

Parse date from soup

_parse_metrics(soup: BeautifulSoup) int[source]

Parse metrics from soup

export()[source]

Export data to csv

location: str = 'North Macedonia'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'count': 'Досега во земјата се направени вкупно (\\d+)', 'date': '(\\d{1,2}.\\d{2}.\\d{4})'}
source_label: str = 'Ministry of Health'
source_url: str = 'https://koronavirus.gov.mk/vesti'
source_url_ref: str = 'https://koronavirus.gov.mk/vesti'
units: str = 'tests performed'
cowidev.testing.incremental.north_macedonia.main()[source]

cowidev.testing.incremental.pakistan

class cowidev.testing.incremental.pakistan.Pakistan[source]

Bases: object

_get_relevant_element(soup: BeautifulSoup) Tag[source]

Get the relevant element from soup.

_parse_data(soup: BeautifulSoup) dict[source]

Get data from the source page.

_parse_date_from_soup(soup: BeautifulSoup) str[source]

Get date from soup.

_parse_metrics(elem: Tag) int[source]

Get metrics from element.

export()[source]

Export data to csv.

location = 'Pakistan'
notes = ''
read() Series[source]

Read data from source.

regex = {'count': 'Total Tests', 'date': '(\\d+ \\w+, \\d+)', 'header': 'Pakistan statistics '}
source_label = 'Government of Pakistan'
source_url = 'http://www.covid.gov.pk/'
units = 'tests performed'
cowidev.testing.incremental.pakistan.main()[source]

cowidev.testing.incremental.palau

class cowidev.testing.incremental.palau.Palau[source]

Bases: CountryTestBase

_parse_data(soup: BeautifulSoup) tuple[source]

Gets pdf url

_parse_metrics(text: str) tuple[source]

Get metrics from report text.

export()[source]
location: str = 'Palau'
read() Series[source]
regex: dict = {'count': '((\\d+),(\\d+))COVID-19 Testsconducted \\(since', 'date': '(\\d{1,2} \\w+ 20\\d{2})'}
source_label: str = 'Ministry of Health and Human Services'
source_url: str = 'http://www.palauhealth.org/'
source_url_ref: str = ''
units: str = 'tests performed'
cowidev.testing.incremental.palau.main()[source]

cowidev.testing.incremental.papua_new_guinea

class cowidev.testing.incremental.papua_new_guinea.PapuaNewGuinea[source]

Bases: object

_parse_count(soup: str) str[source]
_parse_date(soup: str) str[source]
export()[source]
location = 'Papua New Guinea'
notes = ''
read()[source]
regex = {'date': '\\d{1,2}[a-z]{2} [A-Za-z]+ \\d{4}'}
source_label = 'Ministry of Health'
source_url = 'https://covid19.info.gov.pg/'
source_url_ref = 'https://covid19.info.gov.pg/'
units = 'people tested'
cowidev.testing.incremental.papua_new_guinea.main()[source]

cowidev.testing.incremental.paraguay

class cowidev.testing.incremental.paraguay.Paraguay[source]

Bases: CountryTestBase

_parse_data(url: str) DataFrame[source]

Parse data from url

_parse_date(t_scraper: TableauScraper) str[source]

Parse date from TableauScraper

_parse_metrics(t_scraper: TableauScraper) int[source]

Parse metrics from TableauScraper

export()[source]

Export data to csv

location: str = 'Paraguay'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

source_label: str = 'Ministry of Public Health and Social Welfare'
source_url: str = 'https://public.tableau.com/views/COVID-19PYTableauPublic/COVID-19Prensa'
source_url_ref: str = 'https://www.mspbs.gov.py/reporte-covid19.html'
units: str = 'tests performed'
cowidev.testing.incremental.paraguay.main()[source]

cowidev.testing.incremental.russia

class cowidev.testing.incremental.russia.Russia[source]

Bases: object

_base_url = 'https://rospotrebnadzor.ru'

Get link from relevant element.

_get_relevant_element(soup: BeautifulSoup) Tag[source]

Get the relevant element in news feed.

_get_text_and_date_from_url(url: str) tuple[source]

Extract text from the url.

_num_max_pages = 3
_parse_data(soup: BeautifulSoup) tuple[source]

Get data from the source page.

_parse_date(soup: BeautifulSoup) str[source]

Get date from relevant element.

_parse_metrics(text: str) int[source]

Get metrics from news text.

_url_subdirectory = '/about/info/news/?PAGEN_1='
export()[source]
location = 'Russia'
notes = ''
read() Series[source]
regex = {'count': 'проведено (\\d+).* исследовани', 'date': '(\\d+ \\d+ \\d+)', 'title': 'Информационный бюллетень о ситуации'}
source_label = 'Government of the Russian Federation'
units = 'tests performed'
cowidev.testing.incremental.russia.main()[source]

cowidev.testing.incremental.saint_kitts_nevis

class cowidev.testing.incremental.saint_kitts_nevis.SaintKittsNevis[source]

Bases: CountryTestBase

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(soup: BeautifulSoup) str[source]

Parse date from soup

_parse_metrics(elem: Tag) int[source]

Parse metrics from element

export()[source]

Export data to csv

location: str = 'Saint Kitts and Nevis'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'element': 'No. of Persons Tested'}
source_label: str = 'Ministry of Health'
source_url: str = 'https://covid19.gov.kn/src/stats2/'
source_url_ref: str = 'https://covid19.gov.kn/src/stats2/'
units: str = 'people tested'
cowidev.testing.incremental.saint_kitts_nevis.main()[source]

cowidev.testing.incremental.saint_lucia

class cowidev.testing.incremental.saint_lucia.SaintLucia[source]

Bases: CountryTestBase

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(soup: BeautifulSoup) str[source]

Parse date from soup

_parse_metrics(soup: BeautifulSoup) int[source]

Parse metrics from soup

export()[source]

Export data to csv

location: str = 'Saint Lucia'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'date': 'As of .*? (\\w+ \\d{1,2}, 20\\d{2})'}
source_label: str = 'Ministry of Health and Wellness'
source_url_ref: str = 'https://www.covid19response.lc/'
units: str = 'tests performed'
cowidev.testing.incremental.saint_lucia.main()[source]

cowidev.testing.incremental.saint_vincent_and_the_grenadines

class cowidev.testing.incremental.saint_vincent_and_the_grenadines.SaintVincentAndTheGrenadines[source]

Bases: CountryTestBase

_extract_text_from_pdf() str[source]

Extract text from pdf.

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup.

_parse_date(text: str) str[source]

Get date from text.

Parse link from soup.

_parse_metrics(text: str) DataFrame[source]

Parse metrics from data.

export()[source]

Export data to CSV.

location: str = 'Saint Vincent and the Grenadines'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data.

read() DataFrame[source]

Read data from source.

regex: dict = {'ag': 'today  Total Rapid Ag \\(.*?\\)  [\\d,]+  ([\\d,]+)', 'date': '(\\w+ \\d{1,2} 20\\d{2})', 'pcr': 'Total PCR Tests done  ([\\d,]+)', 'pdf': 'Please click for full details', 'title': 'COVID-19 Report'}
source_label: str = 'Ministry of Health, Wellness and the Environment'
source_url: dict = 'http://health.gov.vc/health/index.php/c'
source_url_ref: str = None
units: str = 'tests performed'
cowidev.testing.incremental.saint_vincent_and_the_grenadines.main()[source]

cowidev.testing.incremental.singapore

class cowidev.testing.incremental.singapore.Singapore[source]

Bases: CountryTestBase

_build_df(data: dict) DataFrame[source]
_load_last_date() str[source]

Loads the last date from the datafile.

_parse_data() list[source]
_read_art()[source]
_read_pcr()[source]
export()[source]

Exports data to csv.

location: str = 'Singapore'
read() DataFrame[source]

Reads data from source.

source_label: str = 'Ministry of Health Singapore'
source_url: str = 'https://www.moh.gov.sg/covid-19/statistics'
units: str = 'samples tested'
cowidev.testing.incremental.singapore.main()[source]

cowidev.testing.incremental.suriname

class cowidev.testing.incremental.suriname.Suriname[source]

Bases: CountryTestBase

export()[source]

Export data to csv

location: str = 'Suriname'
pipe_pr(df: DataFrame) DataFrame[source]

Calculate Positive Rate

pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

source_label: str = 'Directorate National Security'
source_url: str = 'https://covid-19.sr/'
source_url_ref: str = 'https://covid-19.sr/'
units: str = 'tests performed'
cowidev.testing.incremental.suriname.main()[source]

cowidev.testing.incremental.sweden

class cowidev.testing.incremental.sweden.Sweden[source]

Bases: CountryTestBase

_build_df(data: dict) DataFrame[source]

Builds dataframe from data.

_get_relevant_element(soup: BeautifulSoup) Tag[source]

Gets element from the soup.

_get_text_from_element(elem: Tag) str[source]

Gets text from element.

_get_week_num_from_element(elem: Tag) int[source]

Gets week number from element.

_load_last_date() str[source]

Loads the last date from the datafile.

_parse_data(soup: BeautifulSoup) list[source]

Gets data from the source page.

_parse_date(week_num) Iterator[source]

parses the date from the week number.

_parse_metrics(text: str, week_num) int[source]

Gets metrics from the text.

export()[source]

Exports data to csv.

location: str = 'Sweden'
notes: str = ''
read() DataFrame[source]

Reads data from source.

regex = {'title': 'Antalet testade individer och genomförda test per', 'week': '[vV]ecka (\\d+)'}
source_label: str = 'Swedish Public Health Agency'
source_url: str = 'https://www.folkhalsomyndigheten.se/smittskydd-beredskap/utbrott/aktuella-utbrott/covid-19/statistik-och-analyser/antalet-testade-for-covid-19/'
units: str = 'tests performed'
cowidev.testing.incremental.sweden.main()[source]

cowidev.testing.incremental.syria

class cowidev.testing.incremental.syria.Syria[source]

Bases: CountryTestBase

Extract link and date from relevant element.

_get_relevant_element(soup: BeautifulSoup) Tag[source]

Parses pdf url from soup

_parse_data(soup: BeautifulSoup) tuple[source]

Parses data from soup

_parse_date_from_element(elem: Tag) str[source]

Get date from relevant element.

Get link from relevant element.

_parse_metrics(text: str) int[source]

Get metrics from report text.

_parse_pdf_url(pdf_url: str) str[source]

Parses pdf text

export()[source]

Export data to csv.

location: str = 'Syria'
read() Series[source]

Read data from source.

regex: dict = {'count': 'Total Test (\\d+) (\\d+)', 'date': '(\\d{1,2} \\w+ 20\\d{2})'}
source_label: str = 'WHO Syrian Arab Republic'
source_url: str = 'https://reliefweb.int/updates?advanced-search=%28C226%29_%28S1275%29_%28DT4642%29_%28T4595%29_%28F10%29'
units: str = 'tests performed'
cowidev.testing.incremental.syria.main()[source]

cowidev.testing.incremental.timor

class cowidev.testing.incremental.timor.TimorLeste[source]

Bases: CountryTestBase

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(soup: BeautifulSoup) str[source]

Parse date from soup

_parse_metrics(elem: Tag) int[source]

Parse metrics from element

export()[source]

Export data to csv

location: str = 'Timor'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'date': '(\\w+ \\d{1,2}, \\d{4})', 'element': 'Komulativo Teste'}
source_label: str = 'Ministry of Health'
source_url: str = 'https://covid19.gov.tl/dashboard/'
source_url_ref: str = 'https://covid19.gov.tl/dashboard/'
units: str = 'tests performed'
cowidev.testing.incremental.timor.main()[source]

cowidev.testing.incremental.togo

class cowidev.testing.incremental.togo.Togo[source]

Bases: CountryTestBase

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(soup: BeautifulSoup) str[source]

Parse date from soup

_parse_metrics(soup: BeautifulSoup) int[source]

Parse metrics from soup

export()[source]

Export data to csv

location: str = 'Togo'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'count': 'Nombre total ', 'date': '(\\d{1,2} \\w+ 20\\d{2})'}
source_label: str = 'Ministry of Health'
source_url: str = 'https://covid19.gouv.tg/'
source_url_ref: str = 'https://covid19.gouv.tg/'
units: str = 'tests performed'
cowidev.testing.incremental.togo.main()[source]

cowidev.testing.incremental.tunisia

class cowidev.testing.incremental.tunisia.Tunisia[source]

Bases: CountryTestBase

_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(soup: BeautifulSoup) str[source]

Parse date from soup

_parse_metrics(elem: Tag) int[source]

Parse metrics from element

export()[source]

Export data to csv

location: str = 'Tunisia'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'date': 'Chiffres clés mis à jour le '}
source_label: str = 'Tunisia Ministry of Health'
source_url: str = 'https://onmne.tn'
source_url_ref: str = 'https://onmne.tn'
units: str = 'people tested'
cowidev.testing.incremental.tunisia.main()[source]

cowidev.testing.incremental.ukraine

class cowidev.testing.incremental.ukraine.Ukraine[source]

Bases: object

_get_relevant_element(soup: BeautifulSoup) Tag[source]

Get the relevant element in news feed.

_parse_data(soup: BeautifulSoup) tuple[source]

Get data from the source page.

_parse_date(soup: BeautifulSoup) str[source]

Gets date from the source page.

_parse_metrics(elem: Tag) int[source]

Gets metrics from the element.

export()[source]

Export data to csv.

location = 'Ukraine'
notes = ''
read() Series[source]

Read data from source.

regex = {'count': 'total of tests', 'date': 'Information as of (\\w+) (\\d{1,2})'}
source_label = 'Cabinet of Ministers of Ukraine'
source_url = 'https://covid19.gov.ua/en'
units = 'tests performed'
cowidev.testing.incremental.ukraine.main()[source]

cowidev.testing.incremental.vanuatu

class cowidev.testing.incremental.vanuatu.Vanuatu[source]

Bases: CountryTestBase

_base_url: str = 'https://covid19.gov.vu'
_parse_data(soup: BeautifulSoup) DataFrame[source]

Parse data from soup

_parse_date(tables: list) str[source]

Parse date from the list of tables

_parse_metrics(tables: list) int[source]

Parse metrics from the list of tables

_parse_pdf_tables() list[source]

Parse pdf tables from link

property area: list

Areas of pdf to be extracted :returns: [[y1, x1, y2, x2], …] :rtype: list

For more info see: https://github.com/tabulapdf/tabula-java/wiki/Using-the-command-line-tabula-extractor-tool

export()[source]

Export data to csv

location: str = 'Vanuatu'
pipeline(df: DataFrame) DataFrame[source]

Pipeline for data processing

read() DataFrame[source]

Read data from source

regex: dict = {'date': '\\d{1,2}\\/\\d{2}\\/20\\d{2} - (\\d{1,2}\\/\\d{2}\\/20\\d{2})', 'title': 'Surveillance Report for Epi Week'}
source_label: str = 'Ministry of Health'
source_url: str = 'https://covid19.gov.vu/index.php/surveillance'
source_url_ref: str = None
units: str = 'people tested'
cowidev.testing.incremental.vanuatu.main()[source]

cowidev.testing.incremental.vietnam

class cowidev.testing.incremental.vietnam.Vietnam[source]

Bases: object

Get the relevant URL from the source page.

_get_text_from_url(url: str) str[source]

Extract text from URL.

_parse_data(soup: BeautifulSoup) dict[source]

Get data from the source page.

_parse_date_from_text(soup) str[source]

Get date from text.

_parse_metrics(text: str) int[source]

Get metrics from text.

base_url = 'https://covid19.gov.vn'
export()[source]

Export data to CSV.

location = 'Vietnam'
read() Series[source]

Read data from source.

regex = {'count': 'mẫu tương đương (\\d+)', 'date': '(\\d{2}\\-\\d{2}\\-\\d{4})', 'title': 'Ngày'}
source_label = 'Ministry of Health of Vietnam'
source_url = 'https://covid19.gov.vn/ban-tin-covid-19.htm'
units = 'people tested'
cowidev.testing.incremental.vietnam.main()[source]