partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
error_parsing
|
Print any parsing error and exit with status -1
|
mongotail/err.py
|
def error_parsing(msg="unknown options"):
"""
Print any parsing error and exit with status -1
"""
sys.stderr.write("Error parsing command line: %s\ntry 'mongotail --help' for more information\n" % msg)
sys.stderr.flush()
exit(EINVAL)
|
def error_parsing(msg="unknown options"):
"""
Print any parsing error and exit with status -1
"""
sys.stderr.write("Error parsing command line: %s\ntry 'mongotail --help' for more information\n" % msg)
sys.stderr.flush()
exit(EINVAL)
|
[
"Print",
"any",
"parsing",
"error",
"and",
"exit",
"with",
"status",
"-",
"1"
] |
mrsarm/mongotail
|
python
|
https://github.com/mrsarm/mongotail/blob/82ba74e32eff92faa320833a8d19c58555f9cd49/mongotail/err.py#L42-L48
|
[
"def",
"error_parsing",
"(",
"msg",
"=",
"\"unknown options\"",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Error parsing command line: %s\\ntry 'mongotail --help' for more information\\n\"",
"%",
"msg",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"exit",
"(",
"EINVAL",
")"
] |
82ba74e32eff92faa320833a8d19c58555f9cd49
|
test
|
Menu.get_product_by_name
|
Gets a Item from the Menu by name. Note that the name is not
case-sensitive but must be spelt correctly.
:param string name: The name of the item.
:raises StopIteration: Raises exception if no item is found.
:return: An item object matching the search.
:rtype: Item
|
dominos/models.py
|
def get_product_by_name(self, name):
'''
Gets a Item from the Menu by name. Note that the name is not
case-sensitive but must be spelt correctly.
:param string name: The name of the item.
:raises StopIteration: Raises exception if no item is found.
:return: An item object matching the search.
:rtype: Item
'''
return next(i for i in self.items if i.name.lower() == name.lower())
|
def get_product_by_name(self, name):
'''
Gets a Item from the Menu by name. Note that the name is not
case-sensitive but must be spelt correctly.
:param string name: The name of the item.
:raises StopIteration: Raises exception if no item is found.
:return: An item object matching the search.
:rtype: Item
'''
return next(i for i in self.items if i.name.lower() == name.lower())
|
[
"Gets",
"a",
"Item",
"from",
"the",
"Menu",
"by",
"name",
".",
"Note",
"that",
"the",
"name",
"is",
"not",
"case",
"-",
"sensitive",
"but",
"must",
"be",
"spelt",
"correctly",
"."
] |
tomasbasham/dominos
|
python
|
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/models.py#L59-L69
|
[
"def",
"get_product_by_name",
"(",
"self",
",",
"name",
")",
":",
"return",
"next",
"(",
"i",
"for",
"i",
"in",
"self",
".",
"items",
"if",
"i",
".",
"name",
".",
"lower",
"(",
")",
"==",
"name",
".",
"lower",
"(",
")",
")"
] |
59729a8bdca0ae30a84115a0e93e9b1f259faf0e
|
test
|
Client.new_session
|
Clear out the current session on the remote and setup a new one.
:return: A response from having expired the current session.
:rtype: requests.Response
|
dominos/api.py
|
def new_session(self, session):
'''
Clear out the current session on the remote and setup a new one.
:return: A response from having expired the current session.
:rtype: requests.Response
'''
response = self.__get('/Home/SessionExpire')
self.session = update_session_headers(session)
return response
|
def new_session(self, session):
'''
Clear out the current session on the remote and setup a new one.
:return: A response from having expired the current session.
:rtype: requests.Response
'''
response = self.__get('/Home/SessionExpire')
self.session = update_session_headers(session)
return response
|
[
"Clear",
"out",
"the",
"current",
"session",
"on",
"the",
"remote",
"and",
"setup",
"a",
"new",
"one",
"."
] |
tomasbasham/dominos
|
python
|
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L31-L41
|
[
"def",
"new_session",
"(",
"self",
",",
"session",
")",
":",
"response",
"=",
"self",
".",
"__get",
"(",
"'/Home/SessionExpire'",
")",
"self",
".",
"session",
"=",
"update_session_headers",
"(",
"session",
")",
"return",
"response"
] |
59729a8bdca0ae30a84115a0e93e9b1f259faf0e
|
test
|
Client.reset_store
|
Clears out the current store and gets a cookie. Set the cross site
request forgery token for each subsequent request.
:return: A response having cleared the current store.
:rtype: requests.Response
|
dominos/api.py
|
def reset_store(self):
'''
Clears out the current store and gets a cookie. Set the cross site
request forgery token for each subsequent request.
:return: A response having cleared the current store.
:rtype: requests.Response
'''
response = self.__get('/Store/Reset')
token = self.session.cookies['XSRF-TOKEN']
self.session.headers.update({'X-XSRF-TOKEN': token})
return response
|
def reset_store(self):
'''
Clears out the current store and gets a cookie. Set the cross site
request forgery token for each subsequent request.
:return: A response having cleared the current store.
:rtype: requests.Response
'''
response = self.__get('/Store/Reset')
token = self.session.cookies['XSRF-TOKEN']
self.session.headers.update({'X-XSRF-TOKEN': token})
return response
|
[
"Clears",
"out",
"the",
"current",
"store",
"and",
"gets",
"a",
"cookie",
".",
"Set",
"the",
"cross",
"site",
"request",
"forgery",
"token",
"for",
"each",
"subsequent",
"request",
"."
] |
tomasbasham/dominos
|
python
|
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L43-L56
|
[
"def",
"reset_store",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"__get",
"(",
"'/Store/Reset'",
")",
"token",
"=",
"self",
".",
"session",
".",
"cookies",
"[",
"'XSRF-TOKEN'",
"]",
"self",
".",
"session",
".",
"headers",
".",
"update",
"(",
"{",
"'X-XSRF-TOKEN'",
":",
"token",
"}",
")",
"return",
"response"
] |
59729a8bdca0ae30a84115a0e93e9b1f259faf0e
|
test
|
Client.get_stores
|
Search for dominos pizza stores using a search term.
:param string search: Search term.
:return: A list of nearby stores matching the search term.
:rtype: list
|
dominos/api.py
|
def get_stores(self, search_term):
'''
Search for dominos pizza stores using a search term.
:param string search: Search term.
:return: A list of nearby stores matching the search term.
:rtype: list
'''
params = {'SearchText': search_term}
response = self.__get('/storefindermap/storesearch', params=params)
return Stores(response.json())
|
def get_stores(self, search_term):
'''
Search for dominos pizza stores using a search term.
:param string search: Search term.
:return: A list of nearby stores matching the search term.
:rtype: list
'''
params = {'SearchText': search_term}
response = self.__get('/storefindermap/storesearch', params=params)
return Stores(response.json())
|
[
"Search",
"for",
"dominos",
"pizza",
"stores",
"using",
"a",
"search",
"term",
"."
] |
tomasbasham/dominos
|
python
|
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L58-L69
|
[
"def",
"get_stores",
"(",
"self",
",",
"search_term",
")",
":",
"params",
"=",
"{",
"'SearchText'",
":",
"search_term",
"}",
"response",
"=",
"self",
".",
"__get",
"(",
"'/storefindermap/storesearch'",
",",
"params",
"=",
"params",
")",
"return",
"Stores",
"(",
"response",
".",
"json",
"(",
")",
")"
] |
59729a8bdca0ae30a84115a0e93e9b1f259faf0e
|
test
|
Client.set_delivery_system
|
Set local cookies by initialising the delivery system on the remote.
Requires a store ID and a delivery postcode.
:param Store store: Store id.
:param string postcode: A postcode.
:return: A response having initialised the delivery system.
:rtype: requests.Response
|
dominos/api.py
|
def set_delivery_system(self, store, postcode, fulfilment_method=FULFILMENT_METHOD.DELIVERY):
'''
Set local cookies by initialising the delivery system on the remote.
Requires a store ID and a delivery postcode.
:param Store store: Store id.
:param string postcode: A postcode.
:return: A response having initialised the delivery system.
:rtype: requests.Response
'''
method = 'delivery' if fulfilment_method == FULFILMENT_METHOD.DELIVERY else 'collection'
params = {
'fulfilmentMethod': method,
'postcode': postcode,
'storeid': store.store_id
}
return self.__post('/Journey/Initialize', json=params)
|
def set_delivery_system(self, store, postcode, fulfilment_method=FULFILMENT_METHOD.DELIVERY):
'''
Set local cookies by initialising the delivery system on the remote.
Requires a store ID and a delivery postcode.
:param Store store: Store id.
:param string postcode: A postcode.
:return: A response having initialised the delivery system.
:rtype: requests.Response
'''
method = 'delivery' if fulfilment_method == FULFILMENT_METHOD.DELIVERY else 'collection'
params = {
'fulfilmentMethod': method,
'postcode': postcode,
'storeid': store.store_id
}
return self.__post('/Journey/Initialize', json=params)
|
[
"Set",
"local",
"cookies",
"by",
"initialising",
"the",
"delivery",
"system",
"on",
"the",
"remote",
".",
"Requires",
"a",
"store",
"ID",
"and",
"a",
"delivery",
"postcode",
"."
] |
tomasbasham/dominos
|
python
|
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L82-L100
|
[
"def",
"set_delivery_system",
"(",
"self",
",",
"store",
",",
"postcode",
",",
"fulfilment_method",
"=",
"FULFILMENT_METHOD",
".",
"DELIVERY",
")",
":",
"method",
"=",
"'delivery'",
"if",
"fulfilment_method",
"==",
"FULFILMENT_METHOD",
".",
"DELIVERY",
"else",
"'collection'",
"params",
"=",
"{",
"'fulfilmentMethod'",
":",
"method",
",",
"'postcode'",
":",
"postcode",
",",
"'storeid'",
":",
"store",
".",
"store_id",
"}",
"return",
"self",
".",
"__post",
"(",
"'/Journey/Initialize'",
",",
"json",
"=",
"params",
")"
] |
59729a8bdca0ae30a84115a0e93e9b1f259faf0e
|
test
|
Client.get_menu
|
Retrieve the menu from the selected store.
:param Store store: A store.
:return: The store menu.
:rtype: Menu
|
dominos/api.py
|
def get_menu(self, store):
'''
Retrieve the menu from the selected store.
:param Store store: A store.
:return: The store menu.
:rtype: Menu
'''
params = {
'collectionOnly': not store.delivery_available,
'menuVersion': store.menu_version,
'storeId': store.store_id,
}
response = self.__get('/ProductCatalog/GetStoreCatalog', params=params)
return Menu(response.json())
|
def get_menu(self, store):
'''
Retrieve the menu from the selected store.
:param Store store: A store.
:return: The store menu.
:rtype: Menu
'''
params = {
'collectionOnly': not store.delivery_available,
'menuVersion': store.menu_version,
'storeId': store.store_id,
}
response = self.__get('/ProductCatalog/GetStoreCatalog', params=params)
return Menu(response.json())
|
[
"Retrieve",
"the",
"menu",
"from",
"the",
"selected",
"store",
"."
] |
tomasbasham/dominos
|
python
|
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L102-L117
|
[
"def",
"get_menu",
"(",
"self",
",",
"store",
")",
":",
"params",
"=",
"{",
"'collectionOnly'",
":",
"not",
"store",
".",
"delivery_available",
",",
"'menuVersion'",
":",
"store",
".",
"menu_version",
",",
"'storeId'",
":",
"store",
".",
"store_id",
",",
"}",
"response",
"=",
"self",
".",
"__get",
"(",
"'/ProductCatalog/GetStoreCatalog'",
",",
"params",
"=",
"params",
")",
"return",
"Menu",
"(",
"response",
".",
"json",
"(",
")",
")"
] |
59729a8bdca0ae30a84115a0e93e9b1f259faf0e
|
test
|
Client.add_item_to_basket
|
Add an item to the current basket.
:param Item item: Item from menu.
:param int variant: Item SKU id. Ignored if the item is a side.
:param int quantity: The quantity of item to be added.
:return: A response having added an item to the current basket.
:rtype: requests.Response
|
dominos/api.py
|
def add_item_to_basket(self, item, variant=VARIANT.MEDIUM, quantity=1):
'''
Add an item to the current basket.
:param Item item: Item from menu.
:param int variant: Item SKU id. Ignored if the item is a side.
:param int quantity: The quantity of item to be added.
:return: A response having added an item to the current basket.
:rtype: requests.Response
'''
item_type = item.type
if item_type == 'Pizza':
return self.add_pizza_to_basket(item, variant, quantity)
elif item_type == 'Side':
return self.add_side_to_basket(item, quantity)
return None
|
def add_item_to_basket(self, item, variant=VARIANT.MEDIUM, quantity=1):
'''
Add an item to the current basket.
:param Item item: Item from menu.
:param int variant: Item SKU id. Ignored if the item is a side.
:param int quantity: The quantity of item to be added.
:return: A response having added an item to the current basket.
:rtype: requests.Response
'''
item_type = item.type
if item_type == 'Pizza':
return self.add_pizza_to_basket(item, variant, quantity)
elif item_type == 'Side':
return self.add_side_to_basket(item, quantity)
return None
|
[
"Add",
"an",
"item",
"to",
"the",
"current",
"basket",
"."
] |
tomasbasham/dominos
|
python
|
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L129-L145
|
[
"def",
"add_item_to_basket",
"(",
"self",
",",
"item",
",",
"variant",
"=",
"VARIANT",
".",
"MEDIUM",
",",
"quantity",
"=",
"1",
")",
":",
"item_type",
"=",
"item",
".",
"type",
"if",
"item_type",
"==",
"'Pizza'",
":",
"return",
"self",
".",
"add_pizza_to_basket",
"(",
"item",
",",
"variant",
",",
"quantity",
")",
"elif",
"item_type",
"==",
"'Side'",
":",
"return",
"self",
".",
"add_side_to_basket",
"(",
"item",
",",
"quantity",
")",
"return",
"None"
] |
59729a8bdca0ae30a84115a0e93e9b1f259faf0e
|
test
|
Client.add_pizza_to_basket
|
Add a pizza to the current basket.
:param Item item: Item from menu.
:param int variant: Item SKU id. Some defaults are defined in the VARIANT enum.
:param int quantity: The quantity of pizza to be added.
:return: A response having added a pizza to the current basket.
:rtype: requests.Response
|
dominos/api.py
|
def add_pizza_to_basket(self, item, variant=VARIANT.MEDIUM, quantity=1):
'''
Add a pizza to the current basket.
:param Item item: Item from menu.
:param int variant: Item SKU id. Some defaults are defined in the VARIANT enum.
:param int quantity: The quantity of pizza to be added.
:return: A response having added a pizza to the current basket.
:rtype: requests.Response
'''
item_variant = item[variant]
ingredients = item_variant['ingredients'].update([36, 42])
params = {
'stepId': 0,
'quantity': quantity,
'sizeId': variant,
'productId': item.item_id,
'ingredients': ingredients,
'productIdHalfTwo': 0,
'ingredientsHalfTwo': [],
'recipeReferrer': 0
}
return self.__post('/Basket/AddPizza', json=params)
|
def add_pizza_to_basket(self, item, variant=VARIANT.MEDIUM, quantity=1):
'''
Add a pizza to the current basket.
:param Item item: Item from menu.
:param int variant: Item SKU id. Some defaults are defined in the VARIANT enum.
:param int quantity: The quantity of pizza to be added.
:return: A response having added a pizza to the current basket.
:rtype: requests.Response
'''
item_variant = item[variant]
ingredients = item_variant['ingredients'].update([36, 42])
params = {
'stepId': 0,
'quantity': quantity,
'sizeId': variant,
'productId': item.item_id,
'ingredients': ingredients,
'productIdHalfTwo': 0,
'ingredientsHalfTwo': [],
'recipeReferrer': 0
}
return self.__post('/Basket/AddPizza', json=params)
|
[
"Add",
"a",
"pizza",
"to",
"the",
"current",
"basket",
"."
] |
tomasbasham/dominos
|
python
|
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L147-L171
|
[
"def",
"add_pizza_to_basket",
"(",
"self",
",",
"item",
",",
"variant",
"=",
"VARIANT",
".",
"MEDIUM",
",",
"quantity",
"=",
"1",
")",
":",
"item_variant",
"=",
"item",
"[",
"variant",
"]",
"ingredients",
"=",
"item_variant",
"[",
"'ingredients'",
"]",
".",
"update",
"(",
"[",
"36",
",",
"42",
"]",
")",
"params",
"=",
"{",
"'stepId'",
":",
"0",
",",
"'quantity'",
":",
"quantity",
",",
"'sizeId'",
":",
"variant",
",",
"'productId'",
":",
"item",
".",
"item_id",
",",
"'ingredients'",
":",
"ingredients",
",",
"'productIdHalfTwo'",
":",
"0",
",",
"'ingredientsHalfTwo'",
":",
"[",
"]",
",",
"'recipeReferrer'",
":",
"0",
"}",
"return",
"self",
".",
"__post",
"(",
"'/Basket/AddPizza'",
",",
"json",
"=",
"params",
")"
] |
59729a8bdca0ae30a84115a0e93e9b1f259faf0e
|
test
|
Client.add_side_to_basket
|
Add a side to the current basket.
:param Item item: Item from menu.
:param int quantity: The quantity of side to be added.
:return: A response having added a side to the current basket.
:rtype: requests.Response
|
dominos/api.py
|
def add_side_to_basket(self, item, quantity=1):
'''
Add a side to the current basket.
:param Item item: Item from menu.
:param int quantity: The quantity of side to be added.
:return: A response having added a side to the current basket.
:rtype: requests.Response
'''
item_variant = item[VARIANT.PERSONAL]
params = {
'productSkuId': item_variant['productSkuId'],
'quantity': quantity,
'ComplimentaryItems': []
}
return self.__post('/Basket/AddProduct', json=params)
|
def add_side_to_basket(self, item, quantity=1):
'''
Add a side to the current basket.
:param Item item: Item from menu.
:param int quantity: The quantity of side to be added.
:return: A response having added a side to the current basket.
:rtype: requests.Response
'''
item_variant = item[VARIANT.PERSONAL]
params = {
'productSkuId': item_variant['productSkuId'],
'quantity': quantity,
'ComplimentaryItems': []
}
return self.__post('/Basket/AddProduct', json=params)
|
[
"Add",
"a",
"side",
"to",
"the",
"current",
"basket",
"."
] |
tomasbasham/dominos
|
python
|
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L173-L190
|
[
"def",
"add_side_to_basket",
"(",
"self",
",",
"item",
",",
"quantity",
"=",
"1",
")",
":",
"item_variant",
"=",
"item",
"[",
"VARIANT",
".",
"PERSONAL",
"]",
"params",
"=",
"{",
"'productSkuId'",
":",
"item_variant",
"[",
"'productSkuId'",
"]",
",",
"'quantity'",
":",
"quantity",
",",
"'ComplimentaryItems'",
":",
"[",
"]",
"}",
"return",
"self",
".",
"__post",
"(",
"'/Basket/AddProduct'",
",",
"json",
"=",
"params",
")"
] |
59729a8bdca0ae30a84115a0e93e9b1f259faf0e
|
test
|
Client.remove_item_from_basket
|
Remove an item from the current basket.
:param int idx: Basket item id.
:return: A response having removed an item from the current basket.
:rtype: requests.Response
|
dominos/api.py
|
def remove_item_from_basket(self, idx):
'''
Remove an item from the current basket.
:param int idx: Basket item id.
:return: A response having removed an item from the current basket.
:rtype: requests.Response
'''
params = {
'basketItemId': idx,
'wizardItemDelete': False
}
return self.__post('/Basket/RemoveBasketItem', json=params)
|
def remove_item_from_basket(self, idx):
'''
Remove an item from the current basket.
:param int idx: Basket item id.
:return: A response having removed an item from the current basket.
:rtype: requests.Response
'''
params = {
'basketItemId': idx,
'wizardItemDelete': False
}
return self.__post('/Basket/RemoveBasketItem', json=params)
|
[
"Remove",
"an",
"item",
"from",
"the",
"current",
"basket",
"."
] |
tomasbasham/dominos
|
python
|
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L192-L205
|
[
"def",
"remove_item_from_basket",
"(",
"self",
",",
"idx",
")",
":",
"params",
"=",
"{",
"'basketItemId'",
":",
"idx",
",",
"'wizardItemDelete'",
":",
"False",
"}",
"return",
"self",
".",
"__post",
"(",
"'/Basket/RemoveBasketItem'",
",",
"json",
"=",
"params",
")"
] |
59729a8bdca0ae30a84115a0e93e9b1f259faf0e
|
test
|
Client.set_payment_method
|
Select the payment method going to be used to make a purchase.
:param int method: Payment method id.
:return: A response having set the payment option.
:rtype: requests.Response
|
dominos/api.py
|
def set_payment_method(self, method=PAYMENT_METHOD.CASH_ON_DELIVERY):
'''
Select the payment method going to be used to make a purchase.
:param int method: Payment method id.
:return: A response having set the payment option.
:rtype: requests.Response
'''
params = {'paymentMethod': method}
return self.__post('/PaymentOptions/SetPaymentMethod', json=params)
|
def set_payment_method(self, method=PAYMENT_METHOD.CASH_ON_DELIVERY):
'''
Select the payment method going to be used to make a purchase.
:param int method: Payment method id.
:return: A response having set the payment option.
:rtype: requests.Response
'''
params = {'paymentMethod': method}
return self.__post('/PaymentOptions/SetPaymentMethod', json=params)
|
[
"Select",
"the",
"payment",
"method",
"going",
"to",
"be",
"used",
"to",
"make",
"a",
"purchase",
"."
] |
tomasbasham/dominos
|
python
|
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L207-L216
|
[
"def",
"set_payment_method",
"(",
"self",
",",
"method",
"=",
"PAYMENT_METHOD",
".",
"CASH_ON_DELIVERY",
")",
":",
"params",
"=",
"{",
"'paymentMethod'",
":",
"method",
"}",
"return",
"self",
".",
"__post",
"(",
"'/PaymentOptions/SetPaymentMethod'",
",",
"json",
"=",
"params",
")"
] |
59729a8bdca0ae30a84115a0e93e9b1f259faf0e
|
test
|
Client.process_payment
|
Proceed with payment using the payment method selected earlier.
:return: A response having processes the payment.
:rtype: requests.Response
|
dominos/api.py
|
def process_payment(self):
'''
Proceed with payment using the payment method selected earlier.
:return: A response having processes the payment.
:rtype: requests.Response
'''
params = {
'__RequestVerificationToken': self.session.cookies,
'method': 'submit'
}
return self.__post('/PaymentOptions/Proceed', json=params)
|
def process_payment(self):
'''
Proceed with payment using the payment method selected earlier.
:return: A response having processes the payment.
:rtype: requests.Response
'''
params = {
'__RequestVerificationToken': self.session.cookies,
'method': 'submit'
}
return self.__post('/PaymentOptions/Proceed', json=params)
|
[
"Proceed",
"with",
"payment",
"using",
"the",
"payment",
"method",
"selected",
"earlier",
"."
] |
tomasbasham/dominos
|
python
|
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L224-L236
|
[
"def",
"process_payment",
"(",
"self",
")",
":",
"params",
"=",
"{",
"'__RequestVerificationToken'",
":",
"self",
".",
"session",
".",
"cookies",
",",
"'method'",
":",
"'submit'",
"}",
"return",
"self",
".",
"__post",
"(",
"'/PaymentOptions/Proceed'",
",",
"json",
"=",
"params",
")"
] |
59729a8bdca0ae30a84115a0e93e9b1f259faf0e
|
test
|
Client.__get
|
Make a HTTP GET request to the Dominos UK API with the given parameters
for the current session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
|
dominos/api.py
|
def __get(self, path, **kargs):
'''
Make a HTTP GET request to the Dominos UK API with the given parameters
for the current session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
'''
return self.__call_api(self.session.get, path, **kargs)
|
def __get(self, path, **kargs):
'''
Make a HTTP GET request to the Dominos UK API with the given parameters
for the current session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
'''
return self.__call_api(self.session.get, path, **kargs)
|
[
"Make",
"a",
"HTTP",
"GET",
"request",
"to",
"the",
"Dominos",
"UK",
"API",
"with",
"the",
"given",
"parameters",
"for",
"the",
"current",
"session",
"."
] |
tomasbasham/dominos
|
python
|
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L238-L248
|
[
"def",
"__get",
"(",
"self",
",",
"path",
",",
"*",
"*",
"kargs",
")",
":",
"return",
"self",
".",
"__call_api",
"(",
"self",
".",
"session",
".",
"get",
",",
"path",
",",
"*",
"*",
"kargs",
")"
] |
59729a8bdca0ae30a84115a0e93e9b1f259faf0e
|
test
|
Client.__post
|
Make a HTTP POST request to the Dominos UK API with the given
parameters for the current session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
|
dominos/api.py
|
def __post(self, path, **kargs):
'''
Make a HTTP POST request to the Dominos UK API with the given
parameters for the current session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
'''
return self.__call_api(self.session.post, path, **kargs)
|
def __post(self, path, **kargs):
'''
Make a HTTP POST request to the Dominos UK API with the given
parameters for the current session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
'''
return self.__call_api(self.session.post, path, **kargs)
|
[
"Make",
"a",
"HTTP",
"POST",
"request",
"to",
"the",
"Dominos",
"UK",
"API",
"with",
"the",
"given",
"parameters",
"for",
"the",
"current",
"session",
"."
] |
tomasbasham/dominos
|
python
|
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L250-L260
|
[
"def",
"__post",
"(",
"self",
",",
"path",
",",
"*",
"*",
"kargs",
")",
":",
"return",
"self",
".",
"__call_api",
"(",
"self",
".",
"session",
".",
"post",
",",
"path",
",",
"*",
"*",
"kargs",
")"
] |
59729a8bdca0ae30a84115a0e93e9b1f259faf0e
|
test
|
Client.__call_api
|
Make a HTTP request to the Dominos UK API with the given parameters for
the current session.
:param verb func: HTTP method on the session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
|
dominos/api.py
|
def __call_api(self, verb, path, **kargs):
'''
Make a HTTP request to the Dominos UK API with the given parameters for
the current session.
:param verb func: HTTP method on the session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
'''
response = verb(self.__url(path), **kargs)
if response.status_code != 200:
raise ApiError('{}: {}'.format(response.status_code, response))
return response
|
def __call_api(self, verb, path, **kargs):
'''
Make a HTTP request to the Dominos UK API with the given parameters for
the current session.
:param verb func: HTTP method on the session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
'''
response = verb(self.__url(path), **kargs)
if response.status_code != 200:
raise ApiError('{}: {}'.format(response.status_code, response))
return response
|
[
"Make",
"a",
"HTTP",
"request",
"to",
"the",
"Dominos",
"UK",
"API",
"with",
"the",
"given",
"parameters",
"for",
"the",
"current",
"session",
"."
] |
tomasbasham/dominos
|
python
|
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L264-L280
|
[
"def",
"__call_api",
"(",
"self",
",",
"verb",
",",
"path",
",",
"*",
"*",
"kargs",
")",
":",
"response",
"=",
"verb",
"(",
"self",
".",
"__url",
"(",
"path",
")",
",",
"*",
"*",
"kargs",
")",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"raise",
"ApiError",
"(",
"'{}: {}'",
".",
"format",
"(",
"response",
".",
"status_code",
",",
"response",
")",
")",
"return",
"response"
] |
59729a8bdca0ae30a84115a0e93e9b1f259faf0e
|
test
|
CursesMenu.append_item
|
Add an item to the end of the menu before the exit item
:param MenuItem item: The item to be added
|
cursesmenu/curses_menu.py
|
def append_item(self, item):
"""
Add an item to the end of the menu before the exit item
:param MenuItem item: The item to be added
"""
did_remove = self.remove_exit()
item.menu = self
self.items.append(item)
if did_remove:
self.add_exit()
if self.screen:
max_row, max_cols = self.screen.getmaxyx()
if max_row < 6 + len(self.items):
self.screen.resize(6 + len(self.items), max_cols)
self.draw()
|
def append_item(self, item):
"""
Add an item to the end of the menu before the exit item
:param MenuItem item: The item to be added
"""
did_remove = self.remove_exit()
item.menu = self
self.items.append(item)
if did_remove:
self.add_exit()
if self.screen:
max_row, max_cols = self.screen.getmaxyx()
if max_row < 6 + len(self.items):
self.screen.resize(6 + len(self.items), max_cols)
self.draw()
|
[
"Add",
"an",
"item",
"to",
"the",
"end",
"of",
"the",
"menu",
"before",
"the",
"exit",
"item"
] |
pmbarrett314/curses-menu
|
python
|
https://github.com/pmbarrett314/curses-menu/blob/c76fc00ab9d518eab275e55434fc2941f49c6b30/cursesmenu/curses_menu.py#L88-L103
|
[
"def",
"append_item",
"(",
"self",
",",
"item",
")",
":",
"did_remove",
"=",
"self",
".",
"remove_exit",
"(",
")",
"item",
".",
"menu",
"=",
"self",
"self",
".",
"items",
".",
"append",
"(",
"item",
")",
"if",
"did_remove",
":",
"self",
".",
"add_exit",
"(",
")",
"if",
"self",
".",
"screen",
":",
"max_row",
",",
"max_cols",
"=",
"self",
".",
"screen",
".",
"getmaxyx",
"(",
")",
"if",
"max_row",
"<",
"6",
"+",
"len",
"(",
"self",
".",
"items",
")",
":",
"self",
".",
"screen",
".",
"resize",
"(",
"6",
"+",
"len",
"(",
"self",
".",
"items",
")",
",",
"max_cols",
")",
"self",
".",
"draw",
"(",
")"
] |
c76fc00ab9d518eab275e55434fc2941f49c6b30
|
test
|
CursesMenu.add_exit
|
Add the exit item if necessary. Used to make sure there aren't multiple exit items
:return: True if item needed to be added, False otherwise
:rtype: bool
|
cursesmenu/curses_menu.py
|
def add_exit(self):
"""
Add the exit item if necessary. Used to make sure there aren't multiple exit items
:return: True if item needed to be added, False otherwise
:rtype: bool
"""
if self.items:
if self.items[-1] is not self.exit_item:
self.items.append(self.exit_item)
return True
return False
|
def add_exit(self):
"""
Add the exit item if necessary. Used to make sure there aren't multiple exit items
:return: True if item needed to be added, False otherwise
:rtype: bool
"""
if self.items:
if self.items[-1] is not self.exit_item:
self.items.append(self.exit_item)
return True
return False
|
[
"Add",
"the",
"exit",
"item",
"if",
"necessary",
".",
"Used",
"to",
"make",
"sure",
"there",
"aren",
"t",
"multiple",
"exit",
"items"
] |
pmbarrett314/curses-menu
|
python
|
https://github.com/pmbarrett314/curses-menu/blob/c76fc00ab9d518eab275e55434fc2941f49c6b30/cursesmenu/curses_menu.py#L105-L116
|
[
"def",
"add_exit",
"(",
"self",
")",
":",
"if",
"self",
".",
"items",
":",
"if",
"self",
".",
"items",
"[",
"-",
"1",
"]",
"is",
"not",
"self",
".",
"exit_item",
":",
"self",
".",
"items",
".",
"append",
"(",
"self",
".",
"exit_item",
")",
"return",
"True",
"return",
"False"
] |
c76fc00ab9d518eab275e55434fc2941f49c6b30
|
test
|
CursesMenu.draw
|
Redraws the menu and refreshes the screen. Should be called whenever something changes that needs to be redrawn.
|
cursesmenu/curses_menu.py
|
def draw(self):
"""
Redraws the menu and refreshes the screen. Should be called whenever something changes that needs to be redrawn.
"""
self.screen.border(0)
if self.title is not None:
self.screen.addstr(2, 2, self.title, curses.A_STANDOUT)
if self.subtitle is not None:
self.screen.addstr(4, 2, self.subtitle, curses.A_BOLD)
for index, item in enumerate(self.items):
if self.current_option == index:
text_style = self.highlight
else:
text_style = self.normal
self.screen.addstr(5 + index, 4, item.show(index), text_style)
screen_rows, screen_cols = CursesMenu.stdscr.getmaxyx()
top_row = 0
if 6 + len(self.items) > screen_rows:
if screen_rows + self.current_option < 6 + len(self.items):
top_row = self.current_option
else:
top_row = 6 + len(self.items) - screen_rows
self.screen.refresh(top_row, 0, 0, 0, screen_rows - 1, screen_cols - 1)
|
def draw(self):
"""
Redraws the menu and refreshes the screen. Should be called whenever something changes that needs to be redrawn.
"""
self.screen.border(0)
if self.title is not None:
self.screen.addstr(2, 2, self.title, curses.A_STANDOUT)
if self.subtitle is not None:
self.screen.addstr(4, 2, self.subtitle, curses.A_BOLD)
for index, item in enumerate(self.items):
if self.current_option == index:
text_style = self.highlight
else:
text_style = self.normal
self.screen.addstr(5 + index, 4, item.show(index), text_style)
screen_rows, screen_cols = CursesMenu.stdscr.getmaxyx()
top_row = 0
if 6 + len(self.items) > screen_rows:
if screen_rows + self.current_option < 6 + len(self.items):
top_row = self.current_option
else:
top_row = 6 + len(self.items) - screen_rows
self.screen.refresh(top_row, 0, 0, 0, screen_rows - 1, screen_cols - 1)
|
[
"Redraws",
"the",
"menu",
"and",
"refreshes",
"the",
"screen",
".",
"Should",
"be",
"called",
"whenever",
"something",
"changes",
"that",
"needs",
"to",
"be",
"redrawn",
"."
] |
pmbarrett314/curses-menu
|
python
|
https://github.com/pmbarrett314/curses-menu/blob/c76fc00ab9d518eab275e55434fc2941f49c6b30/cursesmenu/curses_menu.py#L195-L221
|
[
"def",
"draw",
"(",
"self",
")",
":",
"self",
".",
"screen",
".",
"border",
"(",
"0",
")",
"if",
"self",
".",
"title",
"is",
"not",
"None",
":",
"self",
".",
"screen",
".",
"addstr",
"(",
"2",
",",
"2",
",",
"self",
".",
"title",
",",
"curses",
".",
"A_STANDOUT",
")",
"if",
"self",
".",
"subtitle",
"is",
"not",
"None",
":",
"self",
".",
"screen",
".",
"addstr",
"(",
"4",
",",
"2",
",",
"self",
".",
"subtitle",
",",
"curses",
".",
"A_BOLD",
")",
"for",
"index",
",",
"item",
"in",
"enumerate",
"(",
"self",
".",
"items",
")",
":",
"if",
"self",
".",
"current_option",
"==",
"index",
":",
"text_style",
"=",
"self",
".",
"highlight",
"else",
":",
"text_style",
"=",
"self",
".",
"normal",
"self",
".",
"screen",
".",
"addstr",
"(",
"5",
"+",
"index",
",",
"4",
",",
"item",
".",
"show",
"(",
"index",
")",
",",
"text_style",
")",
"screen_rows",
",",
"screen_cols",
"=",
"CursesMenu",
".",
"stdscr",
".",
"getmaxyx",
"(",
")",
"top_row",
"=",
"0",
"if",
"6",
"+",
"len",
"(",
"self",
".",
"items",
")",
">",
"screen_rows",
":",
"if",
"screen_rows",
"+",
"self",
".",
"current_option",
"<",
"6",
"+",
"len",
"(",
"self",
".",
"items",
")",
":",
"top_row",
"=",
"self",
".",
"current_option",
"else",
":",
"top_row",
"=",
"6",
"+",
"len",
"(",
"self",
".",
"items",
")",
"-",
"screen_rows",
"self",
".",
"screen",
".",
"refresh",
"(",
"top_row",
",",
"0",
",",
"0",
",",
"0",
",",
"screen_rows",
"-",
"1",
",",
"screen_cols",
"-",
"1",
")"
] |
c76fc00ab9d518eab275e55434fc2941f49c6b30
|
test
|
CursesMenu.process_user_input
|
Gets the next single character and decides what to do with it
|
cursesmenu/curses_menu.py
|
def process_user_input(self):
"""
Gets the next single character and decides what to do with it
"""
user_input = self.get_input()
go_to_max = ord("9") if len(self.items) >= 9 else ord(str(len(self.items)))
if ord('1') <= user_input <= go_to_max:
self.go_to(user_input - ord('0') - 1)
elif user_input == curses.KEY_DOWN:
self.go_down()
elif user_input == curses.KEY_UP:
self.go_up()
elif user_input == ord("\n"):
self.select()
return user_input
|
def process_user_input(self):
"""
Gets the next single character and decides what to do with it
"""
user_input = self.get_input()
go_to_max = ord("9") if len(self.items) >= 9 else ord(str(len(self.items)))
if ord('1') <= user_input <= go_to_max:
self.go_to(user_input - ord('0') - 1)
elif user_input == curses.KEY_DOWN:
self.go_down()
elif user_input == curses.KEY_UP:
self.go_up()
elif user_input == ord("\n"):
self.select()
return user_input
|
[
"Gets",
"the",
"next",
"single",
"character",
"and",
"decides",
"what",
"to",
"do",
"with",
"it"
] |
pmbarrett314/curses-menu
|
python
|
https://github.com/pmbarrett314/curses-menu/blob/c76fc00ab9d518eab275e55434fc2941f49c6b30/cursesmenu/curses_menu.py#L274-L291
|
[
"def",
"process_user_input",
"(",
"self",
")",
":",
"user_input",
"=",
"self",
".",
"get_input",
"(",
")",
"go_to_max",
"=",
"ord",
"(",
"\"9\"",
")",
"if",
"len",
"(",
"self",
".",
"items",
")",
">=",
"9",
"else",
"ord",
"(",
"str",
"(",
"len",
"(",
"self",
".",
"items",
")",
")",
")",
"if",
"ord",
"(",
"'1'",
")",
"<=",
"user_input",
"<=",
"go_to_max",
":",
"self",
".",
"go_to",
"(",
"user_input",
"-",
"ord",
"(",
"'0'",
")",
"-",
"1",
")",
"elif",
"user_input",
"==",
"curses",
".",
"KEY_DOWN",
":",
"self",
".",
"go_down",
"(",
")",
"elif",
"user_input",
"==",
"curses",
".",
"KEY_UP",
":",
"self",
".",
"go_up",
"(",
")",
"elif",
"user_input",
"==",
"ord",
"(",
"\"\\n\"",
")",
":",
"self",
".",
"select",
"(",
")",
"return",
"user_input"
] |
c76fc00ab9d518eab275e55434fc2941f49c6b30
|
test
|
CursesMenu.select
|
Select the current item and run it
|
cursesmenu/curses_menu.py
|
def select(self):
"""
Select the current item and run it
"""
self.selected_option = self.current_option
self.selected_item.set_up()
self.selected_item.action()
self.selected_item.clean_up()
self.returned_value = self.selected_item.get_return()
self.should_exit = self.selected_item.should_exit
if not self.should_exit:
self.draw()
|
def select(self):
"""
Select the current item and run it
"""
self.selected_option = self.current_option
self.selected_item.set_up()
self.selected_item.action()
self.selected_item.clean_up()
self.returned_value = self.selected_item.get_return()
self.should_exit = self.selected_item.should_exit
if not self.should_exit:
self.draw()
|
[
"Select",
"the",
"current",
"item",
"and",
"run",
"it"
] |
pmbarrett314/curses-menu
|
python
|
https://github.com/pmbarrett314/curses-menu/blob/c76fc00ab9d518eab275e55434fc2941f49c6b30/cursesmenu/curses_menu.py#L323-L335
|
[
"def",
"select",
"(",
"self",
")",
":",
"self",
".",
"selected_option",
"=",
"self",
".",
"current_option",
"self",
".",
"selected_item",
".",
"set_up",
"(",
")",
"self",
".",
"selected_item",
".",
"action",
"(",
")",
"self",
".",
"selected_item",
".",
"clean_up",
"(",
")",
"self",
".",
"returned_value",
"=",
"self",
".",
"selected_item",
".",
"get_return",
"(",
")",
"self",
".",
"should_exit",
"=",
"self",
".",
"selected_item",
".",
"should_exit",
"if",
"not",
"self",
".",
"should_exit",
":",
"self",
".",
"draw",
"(",
")"
] |
c76fc00ab9d518eab275e55434fc2941f49c6b30
|
test
|
ExitItem.show
|
This class overrides this method
|
cursesmenu/curses_menu.py
|
def show(self, index):
"""
This class overrides this method
"""
if self.menu and self.menu.parent:
self.text = "Return to %s menu" % self.menu.parent.title
else:
self.text = "Exit"
return super(ExitItem, self).show(index)
|
def show(self, index):
"""
This class overrides this method
"""
if self.menu and self.menu.parent:
self.text = "Return to %s menu" % self.menu.parent.title
else:
self.text = "Exit"
return super(ExitItem, self).show(index)
|
[
"This",
"class",
"overrides",
"this",
"method"
] |
pmbarrett314/curses-menu
|
python
|
https://github.com/pmbarrett314/curses-menu/blob/c76fc00ab9d518eab275e55434fc2941f49c6b30/cursesmenu/curses_menu.py#L424-L432
|
[
"def",
"show",
"(",
"self",
",",
"index",
")",
":",
"if",
"self",
".",
"menu",
"and",
"self",
".",
"menu",
".",
"parent",
":",
"self",
".",
"text",
"=",
"\"Return to %s menu\"",
"%",
"self",
".",
"menu",
".",
"parent",
".",
"title",
"else",
":",
"self",
".",
"text",
"=",
"\"Exit\"",
"return",
"super",
"(",
"ExitItem",
",",
"self",
")",
".",
"show",
"(",
"index",
")"
] |
c76fc00ab9d518eab275e55434fc2941f49c6b30
|
test
|
FunctionItem.action
|
This class overrides this method
|
cursesmenu/items/function_item.py
|
def action(self):
"""
This class overrides this method
"""
self.return_value = self.function(*self.args, **self.kwargs)
|
def action(self):
"""
This class overrides this method
"""
self.return_value = self.function(*self.args, **self.kwargs)
|
[
"This",
"class",
"overrides",
"this",
"method"
] |
pmbarrett314/curses-menu
|
python
|
https://github.com/pmbarrett314/curses-menu/blob/c76fc00ab9d518eab275e55434fc2941f49c6b30/cursesmenu/items/function_item.py#L31-L35
|
[
"def",
"action",
"(",
"self",
")",
":",
"self",
".",
"return_value",
"=",
"self",
".",
"function",
"(",
"*",
"self",
".",
"args",
",",
"*",
"*",
"self",
".",
"kwargs",
")"
] |
c76fc00ab9d518eab275e55434fc2941f49c6b30
|
test
|
CommandItem.action
|
This class overrides this method
|
cursesmenu/items/command_item.py
|
def action(self):
"""
This class overrides this method
"""
commandline = "{0} {1}".format(self.command, " ".join(self.arguments))
try:
completed_process = subprocess.run(commandline, shell=True)
self.exit_status = completed_process.returncode
except AttributeError:
self.exit_status = subprocess.call(commandline, shell=True)
|
def action(self):
"""
This class overrides this method
"""
commandline = "{0} {1}".format(self.command, " ".join(self.arguments))
try:
completed_process = subprocess.run(commandline, shell=True)
self.exit_status = completed_process.returncode
except AttributeError:
self.exit_status = subprocess.call(commandline, shell=True)
|
[
"This",
"class",
"overrides",
"this",
"method"
] |
pmbarrett314/curses-menu
|
python
|
https://github.com/pmbarrett314/curses-menu/blob/c76fc00ab9d518eab275e55434fc2941f49c6b30/cursesmenu/items/command_item.py#L27-L36
|
[
"def",
"action",
"(",
"self",
")",
":",
"commandline",
"=",
"\"{0} {1}\"",
".",
"format",
"(",
"self",
".",
"command",
",",
"\" \"",
".",
"join",
"(",
"self",
".",
"arguments",
")",
")",
"try",
":",
"completed_process",
"=",
"subprocess",
".",
"run",
"(",
"commandline",
",",
"shell",
"=",
"True",
")",
"self",
".",
"exit_status",
"=",
"completed_process",
".",
"returncode",
"except",
"AttributeError",
":",
"self",
".",
"exit_status",
"=",
"subprocess",
".",
"call",
"(",
"commandline",
",",
"shell",
"=",
"True",
")"
] |
c76fc00ab9d518eab275e55434fc2941f49c6b30
|
test
|
parse_old_menu
|
Take an old-style menuData dictionary and return a CursesMenu
:param dict menu_data:
:return: A new CursesMenu
:rtype: CursesMenu
|
cursesmenu/old_curses_menu.py
|
def parse_old_menu(menu_data):
"""
Take an old-style menuData dictionary and return a CursesMenu
:param dict menu_data:
:return: A new CursesMenu
:rtype: CursesMenu
"""
menu_title = menu_data['title']
menu = CursesMenu(menu_title)
for item in menu_data["options"]:
item_type = item["type"]
item_title = item["title"]
if item_type == menuItem.COMMAND:
item_command = item["command"]
menu.append_item(CommandItem(item_title, item_command, menu))
elif item_type == menuItem.FUNCTION:
item_function = item["function"]
menu.append_item(FunctionItem(item_title, item_function, menu))
elif item_type == menuItem.EXITMENU:
menu.append_item(ExitItem(item_title, menu))
elif item_type == menuItem.NUMBER:
menu.append_item(SelectionItem(item_title, menu))
elif item_type == menuItem.MENU:
new_menu = parse_old_menu(item)
menu.append_item(SubmenuItem(item_title, menu, new_menu))
return menu
|
def parse_old_menu(menu_data):
"""
Take an old-style menuData dictionary and return a CursesMenu
:param dict menu_data:
:return: A new CursesMenu
:rtype: CursesMenu
"""
menu_title = menu_data['title']
menu = CursesMenu(menu_title)
for item in menu_data["options"]:
item_type = item["type"]
item_title = item["title"]
if item_type == menuItem.COMMAND:
item_command = item["command"]
menu.append_item(CommandItem(item_title, item_command, menu))
elif item_type == menuItem.FUNCTION:
item_function = item["function"]
menu.append_item(FunctionItem(item_title, item_function, menu))
elif item_type == menuItem.EXITMENU:
menu.append_item(ExitItem(item_title, menu))
elif item_type == menuItem.NUMBER:
menu.append_item(SelectionItem(item_title, menu))
elif item_type == menuItem.MENU:
new_menu = parse_old_menu(item)
menu.append_item(SubmenuItem(item_title, menu, new_menu))
return menu
|
[
"Take",
"an",
"old",
"-",
"style",
"menuData",
"dictionary",
"and",
"return",
"a",
"CursesMenu"
] |
pmbarrett314/curses-menu
|
python
|
https://github.com/pmbarrett314/curses-menu/blob/c76fc00ab9d518eab275e55434fc2941f49c6b30/cursesmenu/old_curses_menu.py#L20-L47
|
[
"def",
"parse_old_menu",
"(",
"menu_data",
")",
":",
"menu_title",
"=",
"menu_data",
"[",
"'title'",
"]",
"menu",
"=",
"CursesMenu",
"(",
"menu_title",
")",
"for",
"item",
"in",
"menu_data",
"[",
"\"options\"",
"]",
":",
"item_type",
"=",
"item",
"[",
"\"type\"",
"]",
"item_title",
"=",
"item",
"[",
"\"title\"",
"]",
"if",
"item_type",
"==",
"menuItem",
".",
"COMMAND",
":",
"item_command",
"=",
"item",
"[",
"\"command\"",
"]",
"menu",
".",
"append_item",
"(",
"CommandItem",
"(",
"item_title",
",",
"item_command",
",",
"menu",
")",
")",
"elif",
"item_type",
"==",
"menuItem",
".",
"FUNCTION",
":",
"item_function",
"=",
"item",
"[",
"\"function\"",
"]",
"menu",
".",
"append_item",
"(",
"FunctionItem",
"(",
"item_title",
",",
"item_function",
",",
"menu",
")",
")",
"elif",
"item_type",
"==",
"menuItem",
".",
"EXITMENU",
":",
"menu",
".",
"append_item",
"(",
"ExitItem",
"(",
"item_title",
",",
"menu",
")",
")",
"elif",
"item_type",
"==",
"menuItem",
".",
"NUMBER",
":",
"menu",
".",
"append_item",
"(",
"SelectionItem",
"(",
"item_title",
",",
"menu",
")",
")",
"elif",
"item_type",
"==",
"menuItem",
".",
"MENU",
":",
"new_menu",
"=",
"parse_old_menu",
"(",
"item",
")",
"menu",
".",
"append_item",
"(",
"SubmenuItem",
"(",
"item_title",
",",
"menu",
",",
"new_menu",
")",
")",
"return",
"menu"
] |
c76fc00ab9d518eab275e55434fc2941f49c6b30
|
test
|
SubmenuItem.set_up
|
This class overrides this method
|
cursesmenu/items/submenu_item.py
|
def set_up(self):
"""
This class overrides this method
"""
self.menu.pause()
curses.def_prog_mode()
self.menu.clear_screen()
|
def set_up(self):
"""
This class overrides this method
"""
self.menu.pause()
curses.def_prog_mode()
self.menu.clear_screen()
|
[
"This",
"class",
"overrides",
"this",
"method"
] |
pmbarrett314/curses-menu
|
python
|
https://github.com/pmbarrett314/curses-menu/blob/c76fc00ab9d518eab275e55434fc2941f49c6b30/cursesmenu/items/submenu_item.py#L31-L37
|
[
"def",
"set_up",
"(",
"self",
")",
":",
"self",
".",
"menu",
".",
"pause",
"(",
")",
"curses",
".",
"def_prog_mode",
"(",
")",
"self",
".",
"menu",
".",
"clear_screen",
"(",
")"
] |
c76fc00ab9d518eab275e55434fc2941f49c6b30
|
test
|
SubmenuItem.clean_up
|
This class overrides this method
|
cursesmenu/items/submenu_item.py
|
def clean_up(self):
"""
This class overrides this method
"""
self.submenu.join()
self.menu.clear_screen()
curses.reset_prog_mode()
curses.curs_set(1) # reset doesn't do this right
curses.curs_set(0)
self.menu.resume()
|
def clean_up(self):
"""
This class overrides this method
"""
self.submenu.join()
self.menu.clear_screen()
curses.reset_prog_mode()
curses.curs_set(1) # reset doesn't do this right
curses.curs_set(0)
self.menu.resume()
|
[
"This",
"class",
"overrides",
"this",
"method"
] |
pmbarrett314/curses-menu
|
python
|
https://github.com/pmbarrett314/curses-menu/blob/c76fc00ab9d518eab275e55434fc2941f49c6b30/cursesmenu/items/submenu_item.py#L45-L54
|
[
"def",
"clean_up",
"(",
"self",
")",
":",
"self",
".",
"submenu",
".",
"join",
"(",
")",
"self",
".",
"menu",
".",
"clear_screen",
"(",
")",
"curses",
".",
"reset_prog_mode",
"(",
")",
"curses",
".",
"curs_set",
"(",
"1",
")",
"# reset doesn't do this right",
"curses",
".",
"curs_set",
"(",
"0",
")",
"self",
".",
"menu",
".",
"resume",
"(",
")"
] |
c76fc00ab9d518eab275e55434fc2941f49c6b30
|
test
|
add_aggregation_columns
|
Add new columns containing aggregations values on existing columns
---
### Parameters
*mandatory :*
- `group_cols` (*str* or *list*): columns used to aggregate the data
- `aggregations` (*dict*): keys are name of new columns and values are aggregation functions
Examples of aggregation functions : 'sum', 'max'
Available aggregation functions are listed [here](
https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#aggregation)
---
### Example
**Input**
| ENTITY | YEAR | VALUE_1 | VALUE_2 |
|:------:|:----:|:-------:|:-------:|
| A | 2017 | 10 | 3 |
| A | 2017 | 20 | 1 |
| A | 2018 | 10 | 5 |
| A | 2018 | 30 | 4 |
| B | 2017 | 60 | 4 |
| B | 2017 | 40 | 3 |
| B | 2018 | 50 | 7 |
| B | 2018 | 60 | 6 |
```cson
add_aggregation_columns:
group_cols: ['ENTITY', 'YEAR']
aggregations:
sum_value1:
VALUE_1: 'sum' # sum of `VALUE_1` put in `sum_value1` column
max_value1:
VALUE_1: 'max' # max of `VALUE_1` put in `max_value1` column
mean_value2:
VALUE_2: 'mean' # mean of `VALUE_2` put in `mean_value2` column
]
```
**Output**
| ENTITY | YEAR | VALUE_1 | VALUE_2 | sum_value1 | max_value1 | mean_value2 |
|:------:|:----:|:-------:|:-------:|:----------:|:----------:|:-----------:|
| A | 2017 | 10 | 3 | 30 | 20 | 2.0 |
| A | 2017 | 20 | 1 | 30 | 20 | 2.0 |
| A | 2018 | 10 | 5 | 40 | 30 | 4.5 |
| A | 2018 | 30 | 4 | 40 | 30 | 4.5 |
| B | 2017 | 60 | 4 | 100 | 60 | 3.5 |
| B | 2017 | 40 | 3 | 100 | 60 | 3.5 |
| B | 2018 | 50 | 7 | 110 | 60 | 6.5 |
| B | 2018 | 60 | 6 | 110 | 60 | 6.5 |
|
toucan_data_sdk/utils/postprocess/add_aggregation_columns.py
|
def add_aggregation_columns(
df, *,
group_cols: Union[str, List[str]],
aggregations: Dict[str, Agg]
):
"""
Add new columns containing aggregations values on existing columns
---
### Parameters
*mandatory :*
- `group_cols` (*str* or *list*): columns used to aggregate the data
- `aggregations` (*dict*): keys are name of new columns and values are aggregation functions
Examples of aggregation functions : 'sum', 'max'
Available aggregation functions are listed [here](
https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#aggregation)
---
### Example
**Input**
| ENTITY | YEAR | VALUE_1 | VALUE_2 |
|:------:|:----:|:-------:|:-------:|
| A | 2017 | 10 | 3 |
| A | 2017 | 20 | 1 |
| A | 2018 | 10 | 5 |
| A | 2018 | 30 | 4 |
| B | 2017 | 60 | 4 |
| B | 2017 | 40 | 3 |
| B | 2018 | 50 | 7 |
| B | 2018 | 60 | 6 |
```cson
add_aggregation_columns:
group_cols: ['ENTITY', 'YEAR']
aggregations:
sum_value1:
VALUE_1: 'sum' # sum of `VALUE_1` put in `sum_value1` column
max_value1:
VALUE_1: 'max' # max of `VALUE_1` put in `max_value1` column
mean_value2:
VALUE_2: 'mean' # mean of `VALUE_2` put in `mean_value2` column
]
```
**Output**
| ENTITY | YEAR | VALUE_1 | VALUE_2 | sum_value1 | max_value1 | mean_value2 |
|:------:|:----:|:-------:|:-------:|:----------:|:----------:|:-----------:|
| A | 2017 | 10 | 3 | 30 | 20 | 2.0 |
| A | 2017 | 20 | 1 | 30 | 20 | 2.0 |
| A | 2018 | 10 | 5 | 40 | 30 | 4.5 |
| A | 2018 | 30 | 4 | 40 | 30 | 4.5 |
| B | 2017 | 60 | 4 | 100 | 60 | 3.5 |
| B | 2017 | 40 | 3 | 100 | 60 | 3.5 |
| B | 2018 | 50 | 7 | 110 | 60 | 6.5 |
| B | 2018 | 60 | 6 | 110 | 60 | 6.5 |
"""
group = df.groupby(group_cols)
for new_col, aggs in aggregations.items():
assert len(aggs) == 1
(col, agg), *_ = aggs.items()
df[new_col] = group[col].transform(agg)
return df
|
def add_aggregation_columns(
df, *,
group_cols: Union[str, List[str]],
aggregations: Dict[str, Agg]
):
"""
Add new columns containing aggregations values on existing columns
---
### Parameters
*mandatory :*
- `group_cols` (*str* or *list*): columns used to aggregate the data
- `aggregations` (*dict*): keys are name of new columns and values are aggregation functions
Examples of aggregation functions : 'sum', 'max'
Available aggregation functions are listed [here](
https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#aggregation)
---
### Example
**Input**
| ENTITY | YEAR | VALUE_1 | VALUE_2 |
|:------:|:----:|:-------:|:-------:|
| A | 2017 | 10 | 3 |
| A | 2017 | 20 | 1 |
| A | 2018 | 10 | 5 |
| A | 2018 | 30 | 4 |
| B | 2017 | 60 | 4 |
| B | 2017 | 40 | 3 |
| B | 2018 | 50 | 7 |
| B | 2018 | 60 | 6 |
```cson
add_aggregation_columns:
group_cols: ['ENTITY', 'YEAR']
aggregations:
sum_value1:
VALUE_1: 'sum' # sum of `VALUE_1` put in `sum_value1` column
max_value1:
VALUE_1: 'max' # max of `VALUE_1` put in `max_value1` column
mean_value2:
VALUE_2: 'mean' # mean of `VALUE_2` put in `mean_value2` column
]
```
**Output**
| ENTITY | YEAR | VALUE_1 | VALUE_2 | sum_value1 | max_value1 | mean_value2 |
|:------:|:----:|:-------:|:-------:|:----------:|:----------:|:-----------:|
| A | 2017 | 10 | 3 | 30 | 20 | 2.0 |
| A | 2017 | 20 | 1 | 30 | 20 | 2.0 |
| A | 2018 | 10 | 5 | 40 | 30 | 4.5 |
| A | 2018 | 30 | 4 | 40 | 30 | 4.5 |
| B | 2017 | 60 | 4 | 100 | 60 | 3.5 |
| B | 2017 | 40 | 3 | 100 | 60 | 3.5 |
| B | 2018 | 50 | 7 | 110 | 60 | 6.5 |
| B | 2018 | 60 | 6 | 110 | 60 | 6.5 |
"""
group = df.groupby(group_cols)
for new_col, aggs in aggregations.items():
assert len(aggs) == 1
(col, agg), *_ = aggs.items()
df[new_col] = group[col].transform(agg)
return df
|
[
"Add",
"new",
"columns",
"containing",
"aggregations",
"values",
"on",
"existing",
"columns"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/add_aggregation_columns.py#L6-L74
|
[
"def",
"add_aggregation_columns",
"(",
"df",
",",
"*",
",",
"group_cols",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
",",
"aggregations",
":",
"Dict",
"[",
"str",
",",
"Agg",
"]",
")",
":",
"group",
"=",
"df",
".",
"groupby",
"(",
"group_cols",
")",
"for",
"new_col",
",",
"aggs",
"in",
"aggregations",
".",
"items",
"(",
")",
":",
"assert",
"len",
"(",
"aggs",
")",
"==",
"1",
"(",
"col",
",",
"agg",
")",
",",
"",
"*",
"_",
"=",
"aggs",
".",
"items",
"(",
")",
"df",
"[",
"new_col",
"]",
"=",
"group",
"[",
"col",
"]",
".",
"transform",
"(",
"agg",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
top
|
Get the top or flop N results based on a column value for each specified group columns
---
### Parameters
*mandatory :*
- `value` (*str*): column name on which you will rank the results
- `limit` (*int*): Number to specify the N results you want to retrieve.
Use a positive number x to retrieve the first x results.
Use a negative number -x to retrieve the last x results.
*optional :*
- `order` (*str*): `"asc"` or `"desc"` to sort by ascending ou descending order. By default : `"asc"`.
- `group` (*str*, *list of str*): name(s) of columns on which you want to perform the group operation.
---
### Example
**Input**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| lili | 1 | 50 |
| lili | 1 | 20 |
| toto | 1 | 100 |
| toto | 1 | 200 |
| toto | 1 | 300 |
| lala | 1 | 100 |
| lala | 1 | 150 |
| lala | 1 | 250 |
| lala | 2 | 350 |
| lala | 2 | 450 |
```cson
top:
value: 'value'
limit: 4
order: 'asc'
```
**Output**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| lala | 1 | 250 |
| toto | 1 | 300 |
| lala | 2 | 350 |
| lala | 2 | 450 |
|
toucan_data_sdk/utils/postprocess/top.py
|
def top(
df,
value: str,
limit: int,
order: str = 'asc',
group: Union[str, List[str]] = None
):
"""
Get the top or flop N results based on a column value for each specified group columns
---
### Parameters
*mandatory :*
- `value` (*str*): column name on which you will rank the results
- `limit` (*int*): Number to specify the N results you want to retrieve.
Use a positive number x to retrieve the first x results.
Use a negative number -x to retrieve the last x results.
*optional :*
- `order` (*str*): `"asc"` or `"desc"` to sort by ascending ou descending order. By default : `"asc"`.
- `group` (*str*, *list of str*): name(s) of columns on which you want to perform the group operation.
---
### Example
**Input**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| lili | 1 | 50 |
| lili | 1 | 20 |
| toto | 1 | 100 |
| toto | 1 | 200 |
| toto | 1 | 300 |
| lala | 1 | 100 |
| lala | 1 | 150 |
| lala | 1 | 250 |
| lala | 2 | 350 |
| lala | 2 | 450 |
```cson
top:
value: 'value'
limit: 4
order: 'asc'
```
**Output**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| lala | 1 | 250 |
| toto | 1 | 300 |
| lala | 2 | 350 |
| lala | 2 | 450 |
"""
ascending = order != 'desc'
limit = int(limit)
filter_func = 'nlargest' if (limit > 0) ^ ascending else 'nsmallest'
def _top(df):
return getattr(df, filter_func)(abs(limit), value).sort_values(by=value,
ascending=ascending)
if group is None:
df = _top(df)
else:
df = df.groupby(group).apply(_top)
return df
|
def top(
df,
value: str,
limit: int,
order: str = 'asc',
group: Union[str, List[str]] = None
):
"""
Get the top or flop N results based on a column value for each specified group columns
---
### Parameters
*mandatory :*
- `value` (*str*): column name on which you will rank the results
- `limit` (*int*): Number to specify the N results you want to retrieve.
Use a positive number x to retrieve the first x results.
Use a negative number -x to retrieve the last x results.
*optional :*
- `order` (*str*): `"asc"` or `"desc"` to sort by ascending ou descending order. By default : `"asc"`.
- `group` (*str*, *list of str*): name(s) of columns on which you want to perform the group operation.
---
### Example
**Input**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| lili | 1 | 50 |
| lili | 1 | 20 |
| toto | 1 | 100 |
| toto | 1 | 200 |
| toto | 1 | 300 |
| lala | 1 | 100 |
| lala | 1 | 150 |
| lala | 1 | 250 |
| lala | 2 | 350 |
| lala | 2 | 450 |
```cson
top:
value: 'value'
limit: 4
order: 'asc'
```
**Output**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| lala | 1 | 250 |
| toto | 1 | 300 |
| lala | 2 | 350 |
| lala | 2 | 450 |
"""
ascending = order != 'desc'
limit = int(limit)
filter_func = 'nlargest' if (limit > 0) ^ ascending else 'nsmallest'
def _top(df):
return getattr(df, filter_func)(abs(limit), value).sort_values(by=value,
ascending=ascending)
if group is None:
df = _top(df)
else:
df = df.groupby(group).apply(_top)
return df
|
[
"Get",
"the",
"top",
"or",
"flop",
"N",
"results",
"based",
"on",
"a",
"column",
"value",
"for",
"each",
"specified",
"group",
"columns"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/top.py#L4-L77
|
[
"def",
"top",
"(",
"df",
",",
"value",
":",
"str",
",",
"limit",
":",
"int",
",",
"order",
":",
"str",
"=",
"'asc'",
",",
"group",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
")",
":",
"ascending",
"=",
"order",
"!=",
"'desc'",
"limit",
"=",
"int",
"(",
"limit",
")",
"filter_func",
"=",
"'nlargest'",
"if",
"(",
"limit",
">",
"0",
")",
"^",
"ascending",
"else",
"'nsmallest'",
"def",
"_top",
"(",
"df",
")",
":",
"return",
"getattr",
"(",
"df",
",",
"filter_func",
")",
"(",
"abs",
"(",
"limit",
")",
",",
"value",
")",
".",
"sort_values",
"(",
"by",
"=",
"value",
",",
"ascending",
"=",
"ascending",
")",
"if",
"group",
"is",
"None",
":",
"df",
"=",
"_top",
"(",
"df",
")",
"else",
":",
"df",
"=",
"df",
".",
"groupby",
"(",
"group",
")",
".",
"apply",
"(",
"_top",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
top_group
|
Get the top or flop N results based on a function and a column value that agregates the input.
The result is composed by all the original lines including only lines corresponding
to the top groups
---
### Parameters
*mandatory :*
- `value` (*str*): Name of the column name on which you will rank the results.
- `limit` (*int*): Number to specify the N results you want to retrieve from the sorted values.
- Use a positive number x to retrieve the first x results.
- Use a negative number -x to retrieve the last x results.
- `aggregate_by` (*list of str*)): name(s) of columns you want to aggregate
*optional :*
- `order` (*str*): `"asc"` or `"desc"` to sort by ascending ou descending order. By default : `"asc"`.
- `group` (*str*, *list of str*): name(s) of columns on which you want to perform the group operation.
- `function` : Function to use to group over the group column
---
### Example
**Input**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| lili | 1 | 50 |
| lili | 1 | 20 |
| toto | 1 | 100 |
| toto | 1 | 200 |
| toto | 1 | 300 |
| lala | 1 | 100 |
| lala | 1 | 150 |
| lala | 1 | 250 |
| lala | 2 | 350 |
| lala | 2 | 450 |
```cson
top_group:
group: ["Category"]
value: 'value'
aggregate_by: ["variable"]
limit: 2
order: "desc"
```
**Output**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| toto | 1 | 100 |
| toto | 1 | 200 |
| toto | 1 | 300 |
| lala | 1 | 100 |
| lala | 1 | 150 |
| lala | 1 | 250 |
| lala | 2 | 350 |
| lala | 2 | 450 |
|
toucan_data_sdk/utils/postprocess/top.py
|
def top_group(
df,
aggregate_by: List[str],
value: str,
limit: int,
order: str = 'asc',
function: str = 'sum',
group: Union[str, List[str]] = None
):
"""
Get the top or flop N results based on a function and a column value that agregates the input.
The result is composed by all the original lines including only lines corresponding
to the top groups
---
### Parameters
*mandatory :*
- `value` (*str*): Name of the column name on which you will rank the results.
- `limit` (*int*): Number to specify the N results you want to retrieve from the sorted values.
- Use a positive number x to retrieve the first x results.
- Use a negative number -x to retrieve the last x results.
- `aggregate_by` (*list of str*)): name(s) of columns you want to aggregate
*optional :*
- `order` (*str*): `"asc"` or `"desc"` to sort by ascending ou descending order. By default : `"asc"`.
- `group` (*str*, *list of str*): name(s) of columns on which you want to perform the group operation.
- `function` : Function to use to group over the group column
---
### Example
**Input**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| lili | 1 | 50 |
| lili | 1 | 20 |
| toto | 1 | 100 |
| toto | 1 | 200 |
| toto | 1 | 300 |
| lala | 1 | 100 |
| lala | 1 | 150 |
| lala | 1 | 250 |
| lala | 2 | 350 |
| lala | 2 | 450 |
```cson
top_group:
group: ["Category"]
value: 'value'
aggregate_by: ["variable"]
limit: 2
order: "desc"
```
**Output**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| toto | 1 | 100 |
| toto | 1 | 200 |
| toto | 1 | 300 |
| lala | 1 | 100 |
| lala | 1 | 150 |
| lala | 1 | 250 |
| lala | 2 | 350 |
| lala | 2 | 450 |
"""
aggregate_by = aggregate_by or []
group_top = group or []
df2 = df.groupby(group_top + aggregate_by).agg(function).reset_index()
df2 = top(df2, group=group, value=value, limit=limit, order=order).reset_index(drop=True)
df2 = df2[group_top + aggregate_by]
df = df2.merge(df, on=group_top + aggregate_by)
return df
|
def top_group(
df,
aggregate_by: List[str],
value: str,
limit: int,
order: str = 'asc',
function: str = 'sum',
group: Union[str, List[str]] = None
):
"""
Get the top or flop N results based on a function and a column value that agregates the input.
The result is composed by all the original lines including only lines corresponding
to the top groups
---
### Parameters
*mandatory :*
- `value` (*str*): Name of the column name on which you will rank the results.
- `limit` (*int*): Number to specify the N results you want to retrieve from the sorted values.
- Use a positive number x to retrieve the first x results.
- Use a negative number -x to retrieve the last x results.
- `aggregate_by` (*list of str*)): name(s) of columns you want to aggregate
*optional :*
- `order` (*str*): `"asc"` or `"desc"` to sort by ascending ou descending order. By default : `"asc"`.
- `group` (*str*, *list of str*): name(s) of columns on which you want to perform the group operation.
- `function` : Function to use to group over the group column
---
### Example
**Input**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| lili | 1 | 50 |
| lili | 1 | 20 |
| toto | 1 | 100 |
| toto | 1 | 200 |
| toto | 1 | 300 |
| lala | 1 | 100 |
| lala | 1 | 150 |
| lala | 1 | 250 |
| lala | 2 | 350 |
| lala | 2 | 450 |
```cson
top_group:
group: ["Category"]
value: 'value'
aggregate_by: ["variable"]
limit: 2
order: "desc"
```
**Output**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| toto | 1 | 100 |
| toto | 1 | 200 |
| toto | 1 | 300 |
| lala | 1 | 100 |
| lala | 1 | 150 |
| lala | 1 | 250 |
| lala | 2 | 350 |
| lala | 2 | 450 |
"""
aggregate_by = aggregate_by or []
group_top = group or []
df2 = df.groupby(group_top + aggregate_by).agg(function).reset_index()
df2 = top(df2, group=group, value=value, limit=limit, order=order).reset_index(drop=True)
df2 = df2[group_top + aggregate_by]
df = df2.merge(df, on=group_top + aggregate_by)
return df
|
[
"Get",
"the",
"top",
"or",
"flop",
"N",
"results",
"based",
"on",
"a",
"function",
"and",
"a",
"column",
"value",
"that",
"agregates",
"the",
"input",
".",
"The",
"result",
"is",
"composed",
"by",
"all",
"the",
"original",
"lines",
"including",
"only",
"lines",
"corresponding",
"to",
"the",
"top",
"groups"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/top.py#L80-L158
|
[
"def",
"top_group",
"(",
"df",
",",
"aggregate_by",
":",
"List",
"[",
"str",
"]",
",",
"value",
":",
"str",
",",
"limit",
":",
"int",
",",
"order",
":",
"str",
"=",
"'asc'",
",",
"function",
":",
"str",
"=",
"'sum'",
",",
"group",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
")",
":",
"aggregate_by",
"=",
"aggregate_by",
"or",
"[",
"]",
"group_top",
"=",
"group",
"or",
"[",
"]",
"df2",
"=",
"df",
".",
"groupby",
"(",
"group_top",
"+",
"aggregate_by",
")",
".",
"agg",
"(",
"function",
")",
".",
"reset_index",
"(",
")",
"df2",
"=",
"top",
"(",
"df2",
",",
"group",
"=",
"group",
",",
"value",
"=",
"value",
",",
"limit",
"=",
"limit",
",",
"order",
"=",
"order",
")",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
"df2",
"=",
"df2",
"[",
"group_top",
"+",
"aggregate_by",
"]",
"df",
"=",
"df2",
".",
"merge",
"(",
"df",
",",
"on",
"=",
"group_top",
"+",
"aggregate_by",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
convert_str_to_datetime
|
Convert string column into datetime column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to format
- `format` (*str*): current format of the values (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
|
toucan_data_sdk/utils/postprocess/converter.py
|
def convert_str_to_datetime(df, *, column: str, format: str):
"""
Convert string column into datetime column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to format
- `format` (*str*): current format of the values (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
"""
df[column] = pd.to_datetime(df[column], format=format)
return df
|
def convert_str_to_datetime(df, *, column: str, format: str):
"""
Convert string column into datetime column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to format
- `format` (*str*): current format of the values (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
"""
df[column] = pd.to_datetime(df[column], format=format)
return df
|
[
"Convert",
"string",
"column",
"into",
"datetime",
"column"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/converter.py#L4-L18
|
[
"def",
"convert_str_to_datetime",
"(",
"df",
",",
"*",
",",
"column",
":",
"str",
",",
"format",
":",
"str",
")",
":",
"df",
"[",
"column",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"column",
"]",
",",
"format",
"=",
"format",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
convert_datetime_to_str
|
Convert datetime column into string column
---
### Parameters
*mandatory :*
- column (*str*): name of the column to format
- format (*str*): format of the result values (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
*optional :*
- new_column (*str*): name of the output column. By default `column` is overwritten.
|
toucan_data_sdk/utils/postprocess/converter.py
|
def convert_datetime_to_str(df, *, column: str, format: str, new_column: str = None):
"""
Convert datetime column into string column
---
### Parameters
*mandatory :*
- column (*str*): name of the column to format
- format (*str*): format of the result values (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
*optional :*
- new_column (*str*): name of the output column. By default `column` is overwritten.
"""
new_column = new_column or column
df[new_column] = df[column].dt.strftime(format)
return df
|
def convert_datetime_to_str(df, *, column: str, format: str, new_column: str = None):
"""
Convert datetime column into string column
---
### Parameters
*mandatory :*
- column (*str*): name of the column to format
- format (*str*): format of the result values (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
*optional :*
- new_column (*str*): name of the output column. By default `column` is overwritten.
"""
new_column = new_column or column
df[new_column] = df[column].dt.strftime(format)
return df
|
[
"Convert",
"datetime",
"column",
"into",
"string",
"column"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/converter.py#L21-L39
|
[
"def",
"convert_datetime_to_str",
"(",
"df",
",",
"*",
",",
"column",
":",
"str",
",",
"format",
":",
"str",
",",
"new_column",
":",
"str",
"=",
"None",
")",
":",
"new_column",
"=",
"new_column",
"or",
"column",
"df",
"[",
"new_column",
"]",
"=",
"df",
"[",
"column",
"]",
".",
"dt",
".",
"strftime",
"(",
"format",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
change_date_format
|
Convert the format of a date
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to change the format
- `output_format` (*str*): format of the output values (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
*optional :*
- `input_format` (*str*): format of the input values (by default let the parser detect it)
- `new_column` (*str*): name of the output column (by default overwrite `column`)
- `new_time_zone` (*str*): name of new time zone (by default no time zone conversion is done)
---
### Example
**Input**
label | date
:------:|:----:
France | 2017-03-22
Europe | 2016-03-22
```cson
change_date_format:
column: 'date'
input_format: '%Y-%m-%d'
output_format: '%Y-%m'
```
Output :
label | date
:------:|:----:
France | 2017-03
Europe | 2016-03
|
toucan_data_sdk/utils/postprocess/converter.py
|
def change_date_format(
df, *,
column: str,
output_format: str,
input_format: str = None,
new_column: str = None,
new_time_zone=None
):
"""
Convert the format of a date
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to change the format
- `output_format` (*str*): format of the output values (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
*optional :*
- `input_format` (*str*): format of the input values (by default let the parser detect it)
- `new_column` (*str*): name of the output column (by default overwrite `column`)
- `new_time_zone` (*str*): name of new time zone (by default no time zone conversion is done)
---
### Example
**Input**
label | date
:------:|:----:
France | 2017-03-22
Europe | 2016-03-22
```cson
change_date_format:
column: 'date'
input_format: '%Y-%m-%d'
output_format: '%Y-%m'
```
Output :
label | date
:------:|:----:
France | 2017-03
Europe | 2016-03
"""
new_column = new_column or column
df[new_column] = (pd.to_datetime(df[column], format=input_format, utc=True)
.dt.tz_convert(new_time_zone)
.dt.strftime(output_format))
return df
|
def change_date_format(
df, *,
column: str,
output_format: str,
input_format: str = None,
new_column: str = None,
new_time_zone=None
):
"""
Convert the format of a date
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to change the format
- `output_format` (*str*): format of the output values (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
*optional :*
- `input_format` (*str*): format of the input values (by default let the parser detect it)
- `new_column` (*str*): name of the output column (by default overwrite `column`)
- `new_time_zone` (*str*): name of new time zone (by default no time zone conversion is done)
---
### Example
**Input**
label | date
:------:|:----:
France | 2017-03-22
Europe | 2016-03-22
```cson
change_date_format:
column: 'date'
input_format: '%Y-%m-%d'
output_format: '%Y-%m'
```
Output :
label | date
:------:|:----:
France | 2017-03
Europe | 2016-03
"""
new_column = new_column or column
df[new_column] = (pd.to_datetime(df[column], format=input_format, utc=True)
.dt.tz_convert(new_time_zone)
.dt.strftime(output_format))
return df
|
[
"Convert",
"the",
"format",
"of",
"a",
"date"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/converter.py#L42-L96
|
[
"def",
"change_date_format",
"(",
"df",
",",
"*",
",",
"column",
":",
"str",
",",
"output_format",
":",
"str",
",",
"input_format",
":",
"str",
"=",
"None",
",",
"new_column",
":",
"str",
"=",
"None",
",",
"new_time_zone",
"=",
"None",
")",
":",
"new_column",
"=",
"new_column",
"or",
"column",
"df",
"[",
"new_column",
"]",
"=",
"(",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"column",
"]",
",",
"format",
"=",
"input_format",
",",
"utc",
"=",
"True",
")",
".",
"dt",
".",
"tz_convert",
"(",
"new_time_zone",
")",
".",
"dt",
".",
"strftime",
"(",
"output_format",
")",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
cast
|
Convert column's type into type
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to convert
- `type` (*str*): output type. It can be :
- `"int"` : integer type
- `"float"` : general number type
- `"str"` : text type
*optional :*
- `new_column` (*str*): name of the output column.
By default the `column` arguments is modified.
---
### Example
**Input**
| Column 1 | Column 2 | Column 3 |
|:-------:|:--------:|:--------:|
| 'one' | '2014' | 30.0 |
| 'two' | 2015.0 | '1' |
| 3.1 | 2016 | 450 |
```cson
postprocess: [
cast:
column: 'Column 1'
type: 'str'
cast:
column: 'Column 2'
type: 'int'
cast:
column: 'Column 3'
type: 'float'
]
```
**Output**
| Column 1 | Column 2 | Column 3 |
|:-------:|:------:|:--------:|
| 'one' | 2014 | 30.0 |
| 'two' | 2015 | 1.0 |
| '3.1' | 2016 | 450.0 |
|
toucan_data_sdk/utils/postprocess/converter.py
|
def cast(df, column: str, type: str, new_column=None):
"""
Convert column's type into type
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to convert
- `type` (*str*): output type. It can be :
- `"int"` : integer type
- `"float"` : general number type
- `"str"` : text type
*optional :*
- `new_column` (*str*): name of the output column.
By default the `column` arguments is modified.
---
### Example
**Input**
| Column 1 | Column 2 | Column 3 |
|:-------:|:--------:|:--------:|
| 'one' | '2014' | 30.0 |
| 'two' | 2015.0 | '1' |
| 3.1 | 2016 | 450 |
```cson
postprocess: [
cast:
column: 'Column 1'
type: 'str'
cast:
column: 'Column 2'
type: 'int'
cast:
column: 'Column 3'
type: 'float'
]
```
**Output**
| Column 1 | Column 2 | Column 3 |
|:-------:|:------:|:--------:|
| 'one' | 2014 | 30.0 |
| 'two' | 2015 | 1.0 |
| '3.1' | 2016 | 450.0 |
"""
new_column = new_column or column
df[new_column] = df[column].astype(type)
return df
|
def cast(df, column: str, type: str, new_column=None):
"""
Convert column's type into type
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to convert
- `type` (*str*): output type. It can be :
- `"int"` : integer type
- `"float"` : general number type
- `"str"` : text type
*optional :*
- `new_column` (*str*): name of the output column.
By default the `column` arguments is modified.
---
### Example
**Input**
| Column 1 | Column 2 | Column 3 |
|:-------:|:--------:|:--------:|
| 'one' | '2014' | 30.0 |
| 'two' | 2015.0 | '1' |
| 3.1 | 2016 | 450 |
```cson
postprocess: [
cast:
column: 'Column 1'
type: 'str'
cast:
column: 'Column 2'
type: 'int'
cast:
column: 'Column 3'
type: 'float'
]
```
**Output**
| Column 1 | Column 2 | Column 3 |
|:-------:|:------:|:--------:|
| 'one' | 2014 | 30.0 |
| 'two' | 2015 | 1.0 |
| '3.1' | 2016 | 450.0 |
"""
new_column = new_column or column
df[new_column] = df[column].astype(type)
return df
|
[
"Convert",
"column",
"s",
"type",
"into",
"type"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/converter.py#L99-L154
|
[
"def",
"cast",
"(",
"df",
",",
"column",
":",
"str",
",",
"type",
":",
"str",
",",
"new_column",
"=",
"None",
")",
":",
"new_column",
"=",
"new_column",
"or",
"column",
"df",
"[",
"new_column",
"]",
"=",
"df",
"[",
"column",
"]",
".",
"astype",
"(",
"type",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
compute_evolution_by_frequency
|
This function answers the question: how has a value changed on a weekly, monthly, yearly basis ?
---
### Parameters
*mandatory :*
- `id_cols` (*list*): name of the columns used to create each group.
- `date_col` (*str or dict*): either directly the name of the column containing the date or a dictionary with:
- `selector` (*str*): the name of the column
- `format` (*str*): the format of the date (see [pandas doc](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
- `value_col` (*str*): name of the column containing the value to compare.
*optional :*
- `freq` (*int/pd.DateOffset/pd.Serie/dict*): the frequency at which we calculate evolutions
- `method` (*str*): either `"abs"` for absolute values or `"pct"` for the evolution in percentage of previous value.
- `offseted_suffix` (*str*): suffix of the offseted column. By default, `"_offseted"`.
- `evolution_col_name` (*str*): name given to the evolution column. By default, `"evolution_computed"`.
- `missing_date_as_zero` (*boolean*): add missing date with zero value.
- `raise_duplicate_error` (*boolean*): raise an error when the dataset has duplicated values with the given `id_cols`.
- `format` (*str*): `'df'` # Do not change it !!!
---
### Example
**Input**
| id_cols | value_col | date_col|
|:---------:|:------------:|:----------:|
| A | 20 | 2010|
| | 7 | 2011|
| B | 200 | 2010|
| | 220 | 2011|
| C | 100 | 2011|
```cson
compute_evolution_by_frequency:
id_cols: "id_cols"
date_col: "date_col"
value_col: "value_col"
```
**Output**
| id_cols | value_col | date_col| evolution|
|:---------:|:------------:|:----------:|:---------:|
| A | 20 | 2010| null|
| | 7 | 2011| -13|
| B | 200 | 2010| null|
| | 220 | 2011| 20|
| C | 100 | 2011| null|
|
toucan_data_sdk/utils/generic/compute_evolution.py
|
def compute_evolution_by_frequency(
df,
id_cols: List[str],
date_col: Union[str, Dict[str, str]],
value_col: str,
freq=1,
method: str = 'abs',
format: str = 'column',
offseted_suffix: str = '_offseted',
evolution_col_name: str = 'evolution_computed',
missing_date_as_zero: bool = False,
raise_duplicate_error: bool = True
):
"""
This function answers the question: how has a value changed on a weekly, monthly, yearly basis ?
---
### Parameters
*mandatory :*
- `id_cols` (*list*): name of the columns used to create each group.
- `date_col` (*str or dict*): either directly the name of the column containing the date or a dictionary with:
- `selector` (*str*): the name of the column
- `format` (*str*): the format of the date (see [pandas doc](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
- `value_col` (*str*): name of the column containing the value to compare.
*optional :*
- `freq` (*int/pd.DateOffset/pd.Serie/dict*): the frequency at which we calculate evolutions
- `method` (*str*): either `"abs"` for absolute values or `"pct"` for the evolution in percentage of previous value.
- `offseted_suffix` (*str*): suffix of the offseted column. By default, `"_offseted"`.
- `evolution_col_name` (*str*): name given to the evolution column. By default, `"evolution_computed"`.
- `missing_date_as_zero` (*boolean*): add missing date with zero value.
- `raise_duplicate_error` (*boolean*): raise an error when the dataset has duplicated values with the given `id_cols`.
- `format` (*str*): `'df'` # Do not change it !!!
---
### Example
**Input**
| id_cols | value_col | date_col|
|:---------:|:------------:|:----------:|
| A | 20 | 2010|
| | 7 | 2011|
| B | 200 | 2010|
| | 220 | 2011|
| C | 100 | 2011|
```cson
compute_evolution_by_frequency:
id_cols: "id_cols"
date_col: "date_col"
value_col: "value_col"
```
**Output**
| id_cols | value_col | date_col| evolution|
|:---------:|:------------:|:----------:|:---------:|
| A | 20 | 2010| null|
| | 7 | 2011| -13|
| B | 200 | 2010| null|
| | 220 | 2011| 20|
| C | 100 | 2011| null|
"""
if missing_date_as_zero:
how = 'outer'
fillna = 0
else:
how = 'left'
fillna = None
return __compute_evolution(
df=df,
id_cols=id_cols,
value_col=value_col,
date_col=date_col,
freq=freq,
method=method,
format=format,
offseted_suffix=offseted_suffix,
evolution_col_name=evolution_col_name,
how=how,
fillna=fillna,
raise_duplicate_error=raise_duplicate_error
)
|
def compute_evolution_by_frequency(
df,
id_cols: List[str],
date_col: Union[str, Dict[str, str]],
value_col: str,
freq=1,
method: str = 'abs',
format: str = 'column',
offseted_suffix: str = '_offseted',
evolution_col_name: str = 'evolution_computed',
missing_date_as_zero: bool = False,
raise_duplicate_error: bool = True
):
"""
This function answers the question: how has a value changed on a weekly, monthly, yearly basis ?
---
### Parameters
*mandatory :*
- `id_cols` (*list*): name of the columns used to create each group.
- `date_col` (*str or dict*): either directly the name of the column containing the date or a dictionary with:
- `selector` (*str*): the name of the column
- `format` (*str*): the format of the date (see [pandas doc](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
- `value_col` (*str*): name of the column containing the value to compare.
*optional :*
- `freq` (*int/pd.DateOffset/pd.Serie/dict*): the frequency at which we calculate evolutions
- `method` (*str*): either `"abs"` for absolute values or `"pct"` for the evolution in percentage of previous value.
- `offseted_suffix` (*str*): suffix of the offseted column. By default, `"_offseted"`.
- `evolution_col_name` (*str*): name given to the evolution column. By default, `"evolution_computed"`.
- `missing_date_as_zero` (*boolean*): add missing date with zero value.
- `raise_duplicate_error` (*boolean*): raise an error when the dataset has duplicated values with the given `id_cols`.
- `format` (*str*): `'df'` # Do not change it !!!
---
### Example
**Input**
| id_cols | value_col | date_col|
|:---------:|:------------:|:----------:|
| A | 20 | 2010|
| | 7 | 2011|
| B | 200 | 2010|
| | 220 | 2011|
| C | 100 | 2011|
```cson
compute_evolution_by_frequency:
id_cols: "id_cols"
date_col: "date_col"
value_col: "value_col"
```
**Output**
| id_cols | value_col | date_col| evolution|
|:---------:|:------------:|:----------:|:---------:|
| A | 20 | 2010| null|
| | 7 | 2011| -13|
| B | 200 | 2010| null|
| | 220 | 2011| 20|
| C | 100 | 2011| null|
"""
if missing_date_as_zero:
how = 'outer'
fillna = 0
else:
how = 'left'
fillna = None
return __compute_evolution(
df=df,
id_cols=id_cols,
value_col=value_col,
date_col=date_col,
freq=freq,
method=method,
format=format,
offseted_suffix=offseted_suffix,
evolution_col_name=evolution_col_name,
how=how,
fillna=fillna,
raise_duplicate_error=raise_duplicate_error
)
|
[
"This",
"function",
"answers",
"the",
"question",
":",
"how",
"has",
"a",
"value",
"changed",
"on",
"a",
"weekly",
"monthly",
"yearly",
"basis",
"?"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/generic/compute_evolution.py#L10-L98
|
[
"def",
"compute_evolution_by_frequency",
"(",
"df",
",",
"id_cols",
":",
"List",
"[",
"str",
"]",
",",
"date_col",
":",
"Union",
"[",
"str",
",",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
",",
"value_col",
":",
"str",
",",
"freq",
"=",
"1",
",",
"method",
":",
"str",
"=",
"'abs'",
",",
"format",
":",
"str",
"=",
"'column'",
",",
"offseted_suffix",
":",
"str",
"=",
"'_offseted'",
",",
"evolution_col_name",
":",
"str",
"=",
"'evolution_computed'",
",",
"missing_date_as_zero",
":",
"bool",
"=",
"False",
",",
"raise_duplicate_error",
":",
"bool",
"=",
"True",
")",
":",
"if",
"missing_date_as_zero",
":",
"how",
"=",
"'outer'",
"fillna",
"=",
"0",
"else",
":",
"how",
"=",
"'left'",
"fillna",
"=",
"None",
"return",
"__compute_evolution",
"(",
"df",
"=",
"df",
",",
"id_cols",
"=",
"id_cols",
",",
"value_col",
"=",
"value_col",
",",
"date_col",
"=",
"date_col",
",",
"freq",
"=",
"freq",
",",
"method",
"=",
"method",
",",
"format",
"=",
"format",
",",
"offseted_suffix",
"=",
"offseted_suffix",
",",
"evolution_col_name",
"=",
"evolution_col_name",
",",
"how",
"=",
"how",
",",
"fillna",
"=",
"fillna",
",",
"raise_duplicate_error",
"=",
"raise_duplicate_error",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
compute_evolution_by_criteria
|
This function answers the question: how has a value changed compare to a specific value ?
---
### Parameters
*mandatory :*
- `id_cols` (*list*): columns used to create each group
- `value_col` (*str*): name of the column containing the value to compare
- `compare_to` (*str*): the query identifying a specific set of values for comparison.
*optional :*
- `method` (*str*): either `"abs"` for absolute values or `"pct"` for the evolution in percentage of previous value.
- `offseted_suffix` (*str*): suffix of the offseted column. By default, `"_offseted"`.
- `evolution_col_name` (*str*): name given to the evolution column. By default, `"evolution_computed"`.
- `raise_duplicate_error` (*boolean*): raise an error when the dataset has duplicated values with the given `id_cols`.
- `format` (*str*): `'df'` # Do not change it !!!
---
### Example
**Input**
| id_cols | value_col | month|
|:---------:|:------------:|:-------:|
| A | 100 | 1|
| | 250 | 12|
| B | 300 | 1|
| | 200 | 12|
```cson
compute_evolution_by_criteria:
id_cols: "id_cols"
value_col: "value_col"
compare_to: "month==12"
```
**Output**
| id_cols | value_col | month| value_offseted | evolution_computed|
|:---------:|:------------:|:-------:|:----------------:|:-----------------:|
| A | 100 | 1| 250| -150|
| | 250 | 12| 250| 0|
| B | 300 | 1| 200| 100|
| | 200 | 12| 200| 0|
|
toucan_data_sdk/utils/generic/compute_evolution.py
|
def compute_evolution_by_criteria(
df,
id_cols: List[str],
value_col: str,
compare_to: str,
method: str = 'abs',
format: str = 'column',
offseted_suffix: str = '_offseted',
evolution_col_name: str = 'evolution_computed',
raise_duplicate_error: bool = True
):
"""
This function answers the question: how has a value changed compare to a specific value ?
---
### Parameters
*mandatory :*
- `id_cols` (*list*): columns used to create each group
- `value_col` (*str*): name of the column containing the value to compare
- `compare_to` (*str*): the query identifying a specific set of values for comparison.
*optional :*
- `method` (*str*): either `"abs"` for absolute values or `"pct"` for the evolution in percentage of previous value.
- `offseted_suffix` (*str*): suffix of the offseted column. By default, `"_offseted"`.
- `evolution_col_name` (*str*): name given to the evolution column. By default, `"evolution_computed"`.
- `raise_duplicate_error` (*boolean*): raise an error when the dataset has duplicated values with the given `id_cols`.
- `format` (*str*): `'df'` # Do not change it !!!
---
### Example
**Input**
| id_cols | value_col | month|
|:---------:|:------------:|:-------:|
| A | 100 | 1|
| | 250 | 12|
| B | 300 | 1|
| | 200 | 12|
```cson
compute_evolution_by_criteria:
id_cols: "id_cols"
value_col: "value_col"
compare_to: "month==12"
```
**Output**
| id_cols | value_col | month| value_offseted | evolution_computed|
|:---------:|:------------:|:-------:|:----------------:|:-----------------:|
| A | 100 | 1| 250| -150|
| | 250 | 12| 250| 0|
| B | 300 | 1| 200| 100|
| | 200 | 12| 200| 0|
"""
return __compute_evolution(**locals())
|
def compute_evolution_by_criteria(
df,
id_cols: List[str],
value_col: str,
compare_to: str,
method: str = 'abs',
format: str = 'column',
offseted_suffix: str = '_offseted',
evolution_col_name: str = 'evolution_computed',
raise_duplicate_error: bool = True
):
"""
This function answers the question: how has a value changed compare to a specific value ?
---
### Parameters
*mandatory :*
- `id_cols` (*list*): columns used to create each group
- `value_col` (*str*): name of the column containing the value to compare
- `compare_to` (*str*): the query identifying a specific set of values for comparison.
*optional :*
- `method` (*str*): either `"abs"` for absolute values or `"pct"` for the evolution in percentage of previous value.
- `offseted_suffix` (*str*): suffix of the offseted column. By default, `"_offseted"`.
- `evolution_col_name` (*str*): name given to the evolution column. By default, `"evolution_computed"`.
- `raise_duplicate_error` (*boolean*): raise an error when the dataset has duplicated values with the given `id_cols`.
- `format` (*str*): `'df'` # Do not change it !!!
---
### Example
**Input**
| id_cols | value_col | month|
|:---------:|:------------:|:-------:|
| A | 100 | 1|
| | 250 | 12|
| B | 300 | 1|
| | 200 | 12|
```cson
compute_evolution_by_criteria:
id_cols: "id_cols"
value_col: "value_col"
compare_to: "month==12"
```
**Output**
| id_cols | value_col | month| value_offseted | evolution_computed|
|:---------:|:------------:|:-------:|:----------------:|:-----------------:|
| A | 100 | 1| 250| -150|
| | 250 | 12| 250| 0|
| B | 300 | 1| 200| 100|
| | 200 | 12| 200| 0|
"""
return __compute_evolution(**locals())
|
[
"This",
"function",
"answers",
"the",
"question",
":",
"how",
"has",
"a",
"value",
"changed",
"compare",
"to",
"a",
"specific",
"value",
"?"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/generic/compute_evolution.py#L101-L160
|
[
"def",
"compute_evolution_by_criteria",
"(",
"df",
",",
"id_cols",
":",
"List",
"[",
"str",
"]",
",",
"value_col",
":",
"str",
",",
"compare_to",
":",
"str",
",",
"method",
":",
"str",
"=",
"'abs'",
",",
"format",
":",
"str",
"=",
"'column'",
",",
"offseted_suffix",
":",
"str",
"=",
"'_offseted'",
",",
"evolution_col_name",
":",
"str",
"=",
"'evolution_computed'",
",",
"raise_duplicate_error",
":",
"bool",
"=",
"True",
")",
":",
"return",
"__compute_evolution",
"(",
"*",
"*",
"locals",
"(",
")",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
__compute_evolution
|
Compute an evolution column :
- against a period distant from a fixed frequency.
- against a part of the df
Unfortunately, pandas doesn't allow .change() and .pct_change() to be
executed with a MultiIndex.
Args:
df (pd.DataFrame):
id_cols (list(str)):
value_col (str):
date_col (str/dict): default None
freq (int/pd.DateOffset/pd.Serie): default 1
compare_to (str): default None
method (str): default ``'abs'`` can be also ``'pct'``
format(str): default 'column' can be also 'df'
offseted_suffix(str): default '_offseted'
evolution_col_name(str): default 'evolution_computed'
how(str): default 'left'
fillna(str/int): default None
|
toucan_data_sdk/utils/generic/compute_evolution.py
|
def __compute_evolution(
df,
id_cols,
value_col,
date_col=None,
freq=1,
compare_to=None,
method='abs',
format='column',
offseted_suffix='_offseted',
evolution_col_name='evolution_computed',
how='left',
fillna=None,
raise_duplicate_error=True
):
"""
Compute an evolution column :
- against a period distant from a fixed frequency.
- against a part of the df
Unfortunately, pandas doesn't allow .change() and .pct_change() to be
executed with a MultiIndex.
Args:
df (pd.DataFrame):
id_cols (list(str)):
value_col (str):
date_col (str/dict): default None
freq (int/pd.DateOffset/pd.Serie): default 1
compare_to (str): default None
method (str): default ``'abs'`` can be also ``'pct'``
format(str): default 'column' can be also 'df'
offseted_suffix(str): default '_offseted'
evolution_col_name(str): default 'evolution_computed'
how(str): default 'left'
fillna(str/int): default None
"""
if date_col is not None:
is_date_to_format = isinstance(date_col, dict) or (df[date_col].dtype == np.object)
if is_date_to_format:
if isinstance(date_col, dict):
date_format = date_col.get('format', None)
date_col = date_col['selector']
else:
date_format = None
df['_'+date_col + '_copy_'] = pd.to_datetime(df[date_col], format=date_format)
date_col = '_'+date_col + '_copy_'
is_freq_dict = isinstance(freq, dict)
if is_freq_dict:
freq = pd.DateOffset(**{k: int(v) for k, v in freq.items()})
check_params_columns_duplicate(id_cols + [value_col, date_col])
# create df_offseted
group_cols = id_cols + [date_col]
df_offseted = df[group_cols + [value_col]].copy()
df_offseted[date_col] += freq
df_with_offseted_values = apply_merge(
df, df_offseted, group_cols, how, offseted_suffix,
raise_duplicate_error
)
if is_date_to_format:
del df_with_offseted_values[date_col]
elif compare_to is not None:
# create df_offseted
check_params_columns_duplicate(id_cols + [value_col])
group_cols = id_cols
df_offseted = df.query(compare_to).copy()
df_offseted = df_offseted[group_cols + [value_col]]
df_with_offseted_values = apply_merge(
df, df_offseted, group_cols, how, offseted_suffix,
raise_duplicate_error
)
apply_fillna(df_with_offseted_values, value_col, offseted_suffix, fillna)
apply_method(df_with_offseted_values, evolution_col_name, value_col, offseted_suffix, method)
return apply_format(df_with_offseted_values, evolution_col_name, format)
|
def __compute_evolution(
df,
id_cols,
value_col,
date_col=None,
freq=1,
compare_to=None,
method='abs',
format='column',
offseted_suffix='_offseted',
evolution_col_name='evolution_computed',
how='left',
fillna=None,
raise_duplicate_error=True
):
"""
Compute an evolution column :
- against a period distant from a fixed frequency.
- against a part of the df
Unfortunately, pandas doesn't allow .change() and .pct_change() to be
executed with a MultiIndex.
Args:
df (pd.DataFrame):
id_cols (list(str)):
value_col (str):
date_col (str/dict): default None
freq (int/pd.DateOffset/pd.Serie): default 1
compare_to (str): default None
method (str): default ``'abs'`` can be also ``'pct'``
format(str): default 'column' can be also 'df'
offseted_suffix(str): default '_offseted'
evolution_col_name(str): default 'evolution_computed'
how(str): default 'left'
fillna(str/int): default None
"""
if date_col is not None:
is_date_to_format = isinstance(date_col, dict) or (df[date_col].dtype == np.object)
if is_date_to_format:
if isinstance(date_col, dict):
date_format = date_col.get('format', None)
date_col = date_col['selector']
else:
date_format = None
df['_'+date_col + '_copy_'] = pd.to_datetime(df[date_col], format=date_format)
date_col = '_'+date_col + '_copy_'
is_freq_dict = isinstance(freq, dict)
if is_freq_dict:
freq = pd.DateOffset(**{k: int(v) for k, v in freq.items()})
check_params_columns_duplicate(id_cols + [value_col, date_col])
# create df_offseted
group_cols = id_cols + [date_col]
df_offseted = df[group_cols + [value_col]].copy()
df_offseted[date_col] += freq
df_with_offseted_values = apply_merge(
df, df_offseted, group_cols, how, offseted_suffix,
raise_duplicate_error
)
if is_date_to_format:
del df_with_offseted_values[date_col]
elif compare_to is not None:
# create df_offseted
check_params_columns_duplicate(id_cols + [value_col])
group_cols = id_cols
df_offseted = df.query(compare_to).copy()
df_offseted = df_offseted[group_cols + [value_col]]
df_with_offseted_values = apply_merge(
df, df_offseted, group_cols, how, offseted_suffix,
raise_duplicate_error
)
apply_fillna(df_with_offseted_values, value_col, offseted_suffix, fillna)
apply_method(df_with_offseted_values, evolution_col_name, value_col, offseted_suffix, method)
return apply_format(df_with_offseted_values, evolution_col_name, format)
|
[
"Compute",
"an",
"evolution",
"column",
":",
"-",
"against",
"a",
"period",
"distant",
"from",
"a",
"fixed",
"frequency",
".",
"-",
"against",
"a",
"part",
"of",
"the",
"df"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/generic/compute_evolution.py#L167-L246
|
[
"def",
"__compute_evolution",
"(",
"df",
",",
"id_cols",
",",
"value_col",
",",
"date_col",
"=",
"None",
",",
"freq",
"=",
"1",
",",
"compare_to",
"=",
"None",
",",
"method",
"=",
"'abs'",
",",
"format",
"=",
"'column'",
",",
"offseted_suffix",
"=",
"'_offseted'",
",",
"evolution_col_name",
"=",
"'evolution_computed'",
",",
"how",
"=",
"'left'",
",",
"fillna",
"=",
"None",
",",
"raise_duplicate_error",
"=",
"True",
")",
":",
"if",
"date_col",
"is",
"not",
"None",
":",
"is_date_to_format",
"=",
"isinstance",
"(",
"date_col",
",",
"dict",
")",
"or",
"(",
"df",
"[",
"date_col",
"]",
".",
"dtype",
"==",
"np",
".",
"object",
")",
"if",
"is_date_to_format",
":",
"if",
"isinstance",
"(",
"date_col",
",",
"dict",
")",
":",
"date_format",
"=",
"date_col",
".",
"get",
"(",
"'format'",
",",
"None",
")",
"date_col",
"=",
"date_col",
"[",
"'selector'",
"]",
"else",
":",
"date_format",
"=",
"None",
"df",
"[",
"'_'",
"+",
"date_col",
"+",
"'_copy_'",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"date_col",
"]",
",",
"format",
"=",
"date_format",
")",
"date_col",
"=",
"'_'",
"+",
"date_col",
"+",
"'_copy_'",
"is_freq_dict",
"=",
"isinstance",
"(",
"freq",
",",
"dict",
")",
"if",
"is_freq_dict",
":",
"freq",
"=",
"pd",
".",
"DateOffset",
"(",
"*",
"*",
"{",
"k",
":",
"int",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"freq",
".",
"items",
"(",
")",
"}",
")",
"check_params_columns_duplicate",
"(",
"id_cols",
"+",
"[",
"value_col",
",",
"date_col",
"]",
")",
"# create df_offseted",
"group_cols",
"=",
"id_cols",
"+",
"[",
"date_col",
"]",
"df_offseted",
"=",
"df",
"[",
"group_cols",
"+",
"[",
"value_col",
"]",
"]",
".",
"copy",
"(",
")",
"df_offseted",
"[",
"date_col",
"]",
"+=",
"freq",
"df_with_offseted_values",
"=",
"apply_merge",
"(",
"df",
",",
"df_offseted",
",",
"group_cols",
",",
"how",
",",
"offseted_suffix",
",",
"raise_duplicate_error",
")",
"if",
"is_date_to_format",
":",
"del",
"df_with_offseted_values",
"[",
"date_col",
"]",
"elif",
"compare_to",
"is",
"not",
"None",
":",
"# create df_offseted",
"check_params_columns_duplicate",
"(",
"id_cols",
"+",
"[",
"value_col",
"]",
")",
"group_cols",
"=",
"id_cols",
"df_offseted",
"=",
"df",
".",
"query",
"(",
"compare_to",
")",
".",
"copy",
"(",
")",
"df_offseted",
"=",
"df_offseted",
"[",
"group_cols",
"+",
"[",
"value_col",
"]",
"]",
"df_with_offseted_values",
"=",
"apply_merge",
"(",
"df",
",",
"df_offseted",
",",
"group_cols",
",",
"how",
",",
"offseted_suffix",
",",
"raise_duplicate_error",
")",
"apply_fillna",
"(",
"df_with_offseted_values",
",",
"value_col",
",",
"offseted_suffix",
",",
"fillna",
")",
"apply_method",
"(",
"df_with_offseted_values",
",",
"evolution_col_name",
",",
"value_col",
",",
"offseted_suffix",
",",
"method",
")",
"return",
"apply_format",
"(",
"df_with_offseted_values",
",",
"evolution_col_name",
",",
"format",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
rank
|
This function creates rank columns based on numeric values to be ranked.
---
### Parameters
*mandatory :*
- `value_cols` (*list*): name(s) of the columns used
*optional :*
- `group_cols` (*list*): name(s) of the column(s) used to
create each group inside which independent ranking needs to be applied
- `rank_cols_names` (*list*): the names of the added ranking columns.
If not filled, the ranking will be named after the value_cols with a '_rank' suffix
- `method` (*str*): method to use when encountering equal values:
- `'min'` (default): lowest rank in group
- `'max'`: highest rank in group
- `'average'`: average rank of group
- `'first'`: ranks assigned in order the values appear in the series
- `'dense'`: like 'min', but rank always increases by 1 between groups
- `ascending` (*boolean*): whether the rank should be determined based on
ascending (default) or descending order
---
### Example
**Input**
| ENTITY | YEAR | VALUE_1 | VALUE_2 |
| :---: | :---: | :---: | :---: |
| A | 2017 | 10 | 3 |
| A | 2017 | 20 | 1 |
| A | 2018 | 10 | 5 |
| A | 2018 | 30 | 4 |
| B | 2017 | 60 | 4 |
| B | 2017 | 40 | 3 |
| B | 2018 | 50 | 7 |
| B | 2018 | 50 | 6 |
```cson
rank :
value_cols: 'VALUE_1'
```
**Output**
| ENTITY | YEAR | VALUE_1 | VALUE_2 | VALUE_1_rank
| :---: | :---: | :---: | :---: | :---: |
| A | 2017 | 10 | 3 | 1 |
| A | 2017 | 20 | 1 | 3 |
| A | 2018 | 10 | 5 | 1 |
| A | 2018 | 30 | 4 | 4 |
| B | 2017 | 60 | 4 | 8 |
| B | 2017 | 40 | 3 | 5 |
| B | 2018 | 50 | 7 | 6 |
| B | 2018 | 50 | 6 | 6 |
|
toucan_data_sdk/utils/postprocess/rank.py
|
def rank(
df,
value_cols: Union[str, List[str]],
group_cols: List[str] = None,
rank_cols_names: List[str] = None,
method='min',
ascending: bool = True
):
"""
This function creates rank columns based on numeric values to be ranked.
---
### Parameters
*mandatory :*
- `value_cols` (*list*): name(s) of the columns used
*optional :*
- `group_cols` (*list*): name(s) of the column(s) used to
create each group inside which independent ranking needs to be applied
- `rank_cols_names` (*list*): the names of the added ranking columns.
If not filled, the ranking will be named after the value_cols with a '_rank' suffix
- `method` (*str*): method to use when encountering equal values:
- `'min'` (default): lowest rank in group
- `'max'`: highest rank in group
- `'average'`: average rank of group
- `'first'`: ranks assigned in order the values appear in the series
- `'dense'`: like 'min', but rank always increases by 1 between groups
- `ascending` (*boolean*): whether the rank should be determined based on
ascending (default) or descending order
---
### Example
**Input**
| ENTITY | YEAR | VALUE_1 | VALUE_2 |
| :---: | :---: | :---: | :---: |
| A | 2017 | 10 | 3 |
| A | 2017 | 20 | 1 |
| A | 2018 | 10 | 5 |
| A | 2018 | 30 | 4 |
| B | 2017 | 60 | 4 |
| B | 2017 | 40 | 3 |
| B | 2018 | 50 | 7 |
| B | 2018 | 50 | 6 |
```cson
rank :
value_cols: 'VALUE_1'
```
**Output**
| ENTITY | YEAR | VALUE_1 | VALUE_2 | VALUE_1_rank
| :---: | :---: | :---: | :---: | :---: |
| A | 2017 | 10 | 3 | 1 |
| A | 2017 | 20 | 1 | 3 |
| A | 2018 | 10 | 5 | 1 |
| A | 2018 | 30 | 4 | 4 |
| B | 2017 | 60 | 4 | 8 |
| B | 2017 | 40 | 3 | 5 |
| B | 2018 | 50 | 7 | 6 |
| B | 2018 | 50 | 6 | 6 |
"""
value_cols = [value_cols] if not isinstance(value_cols, list) else value_cols
for col in value_cols:
if not np.issubdtype(df[col].dtype, np.number):
raise TypeError(col + " specified in value_cols must be of numeric type")
if rank_cols_names is None:
rank_cols_names = [x + '_rank' for x in value_cols]
if group_cols is None:
df[rank_cols_names] = df[value_cols].rank(method=method, ascending=ascending)
else:
df[rank_cols_names] = (df.groupby(group_cols)[value_cols]
.rank(method=method, ascending=ascending))
if method != 'average':
df[rank_cols_names] = df[rank_cols_names].astype('int')
return df
|
def rank(
df,
value_cols: Union[str, List[str]],
group_cols: List[str] = None,
rank_cols_names: List[str] = None,
method='min',
ascending: bool = True
):
"""
This function creates rank columns based on numeric values to be ranked.
---
### Parameters
*mandatory :*
- `value_cols` (*list*): name(s) of the columns used
*optional :*
- `group_cols` (*list*): name(s) of the column(s) used to
create each group inside which independent ranking needs to be applied
- `rank_cols_names` (*list*): the names of the added ranking columns.
If not filled, the ranking will be named after the value_cols with a '_rank' suffix
- `method` (*str*): method to use when encountering equal values:
- `'min'` (default): lowest rank in group
- `'max'`: highest rank in group
- `'average'`: average rank of group
- `'first'`: ranks assigned in order the values appear in the series
- `'dense'`: like 'min', but rank always increases by 1 between groups
- `ascending` (*boolean*): whether the rank should be determined based on
ascending (default) or descending order
---
### Example
**Input**
| ENTITY | YEAR | VALUE_1 | VALUE_2 |
| :---: | :---: | :---: | :---: |
| A | 2017 | 10 | 3 |
| A | 2017 | 20 | 1 |
| A | 2018 | 10 | 5 |
| A | 2018 | 30 | 4 |
| B | 2017 | 60 | 4 |
| B | 2017 | 40 | 3 |
| B | 2018 | 50 | 7 |
| B | 2018 | 50 | 6 |
```cson
rank :
value_cols: 'VALUE_1'
```
**Output**
| ENTITY | YEAR | VALUE_1 | VALUE_2 | VALUE_1_rank
| :---: | :---: | :---: | :---: | :---: |
| A | 2017 | 10 | 3 | 1 |
| A | 2017 | 20 | 1 | 3 |
| A | 2018 | 10 | 5 | 1 |
| A | 2018 | 30 | 4 | 4 |
| B | 2017 | 60 | 4 | 8 |
| B | 2017 | 40 | 3 | 5 |
| B | 2018 | 50 | 7 | 6 |
| B | 2018 | 50 | 6 | 6 |
"""
value_cols = [value_cols] if not isinstance(value_cols, list) else value_cols
for col in value_cols:
if not np.issubdtype(df[col].dtype, np.number):
raise TypeError(col + " specified in value_cols must be of numeric type")
if rank_cols_names is None:
rank_cols_names = [x + '_rank' for x in value_cols]
if group_cols is None:
df[rank_cols_names] = df[value_cols].rank(method=method, ascending=ascending)
else:
df[rank_cols_names] = (df.groupby(group_cols)[value_cols]
.rank(method=method, ascending=ascending))
if method != 'average':
df[rank_cols_names] = df[rank_cols_names].astype('int')
return df
|
[
"This",
"function",
"creates",
"rank",
"columns",
"based",
"on",
"numeric",
"values",
"to",
"be",
"ranked",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/rank.py#L6-L91
|
[
"def",
"rank",
"(",
"df",
",",
"value_cols",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
",",
"group_cols",
":",
"List",
"[",
"str",
"]",
"=",
"None",
",",
"rank_cols_names",
":",
"List",
"[",
"str",
"]",
"=",
"None",
",",
"method",
"=",
"'min'",
",",
"ascending",
":",
"bool",
"=",
"True",
")",
":",
"value_cols",
"=",
"[",
"value_cols",
"]",
"if",
"not",
"isinstance",
"(",
"value_cols",
",",
"list",
")",
"else",
"value_cols",
"for",
"col",
"in",
"value_cols",
":",
"if",
"not",
"np",
".",
"issubdtype",
"(",
"df",
"[",
"col",
"]",
".",
"dtype",
",",
"np",
".",
"number",
")",
":",
"raise",
"TypeError",
"(",
"col",
"+",
"\" specified in value_cols must be of numeric type\"",
")",
"if",
"rank_cols_names",
"is",
"None",
":",
"rank_cols_names",
"=",
"[",
"x",
"+",
"'_rank'",
"for",
"x",
"in",
"value_cols",
"]",
"if",
"group_cols",
"is",
"None",
":",
"df",
"[",
"rank_cols_names",
"]",
"=",
"df",
"[",
"value_cols",
"]",
".",
"rank",
"(",
"method",
"=",
"method",
",",
"ascending",
"=",
"ascending",
")",
"else",
":",
"df",
"[",
"rank_cols_names",
"]",
"=",
"(",
"df",
".",
"groupby",
"(",
"group_cols",
")",
"[",
"value_cols",
"]",
".",
"rank",
"(",
"method",
"=",
"method",
",",
"ascending",
"=",
"ascending",
")",
")",
"if",
"method",
"!=",
"'average'",
":",
"df",
"[",
"rank_cols_names",
"]",
"=",
"df",
"[",
"rank_cols_names",
"]",
".",
"astype",
"(",
"'int'",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
waterfall
|
Return a line for each bars of a waterfall chart, totals, groups, subgroups.
Compute the variation and variation rate for each line.
---
### Parameters
*mandatory :*
- `date` (*str*): name of the column that id the period of each lines
- `value` (*str*): name of the column that contains the vaue for each lines
- `start` (*dict*):
- `label`: text displayed under the first master column
- `id`: value in the date col that id lines for the first period
- `end` (*dict*):
- `label`: text displayed under the last master column
- `id`: value in the date col that id lines for the second period
*optional :*
- `upperGroup` (*dict*):
- `id`: name of the column that contains upperGroups unique IDs
- `label`: not required, text displayed under each upperGroups bars,
using ID when it's absent
- `groupsOrder`: not required, order of upperGroups
- `insideGroup` (*dict*):
- `id`: name of the column that contains insideGroups unique IDs
- `label`: not required, text displayed under each insideGroups bars,
using ID when it's absent
- `groupsOrder`: not required, order of insideGroups
- `filters` (*list*): columns to filters on
---
### Example
**Input**
| product_id | played | date | ord | category_id | category_name |
|:------------:|:--------:|:------:|:-----:|:-------------:|:---------------:|
| super clap | 12 | t1 | 1 | clap | Clap |
| clap clap | 1 | t1 | 10 | clap | Clap |
| tac | 1 | t1 | 1 | snare | Snare |
| super clap | 10 | t2 | 1 | clap | Clap |
| tac | 100 | t2 | 1 | snare | Snare |
| bom | 1 | t2 | 1 | tom | Tom |
```cson
waterfall:
upperGroup:
id: 'category_id'
label: 'category_name'
insideGroup:
id: 'product_id'
groupsOrder: 'ord'
date: 'date'
value: 'played'
start:
label: 'Trimestre 1'
id: 't1'
end:
label: 'Trimester 2'
id: 't2'
```
**Output**
| value | label | variation | groups | type | order |
|:-------:|:-----------:|:-----------:|:--------:|:------:|:-------:|
| 14 | Trimestre 1 | NaN | NaN | NaN | NaN |
| -3 | Clap | -0.230769 | clap | parent | NaN |
| -2 | super clap | -0.166667 | clap | child | 1 |
| -1 | clap clap | -1 | clap | child | 10 |
| 99 | Snare | 99 | snare | parent | NaN |
| 99 | tac | 99 | snare | child | 1 |
| 1 | Tom | inf | tom | parent | NaN |
| 1 | bom | inf | tom | child | 1 |
| 111 | Trimester 2 | NaN | NaN | NaN | NaN |
|
toucan_data_sdk/utils/postprocess/waterfall.py
|
def waterfall(
df,
date: str,
value: str,
start: Dict[str, str],
end: Dict[str, str],
upperGroup: Dict[str, str],
insideGroup: Dict[str, str] = None,
filters: List[str] = None
):
"""
Return a line for each bars of a waterfall chart, totals, groups, subgroups.
Compute the variation and variation rate for each line.
---
### Parameters
*mandatory :*
- `date` (*str*): name of the column that id the period of each lines
- `value` (*str*): name of the column that contains the vaue for each lines
- `start` (*dict*):
- `label`: text displayed under the first master column
- `id`: value in the date col that id lines for the first period
- `end` (*dict*):
- `label`: text displayed under the last master column
- `id`: value in the date col that id lines for the second period
*optional :*
- `upperGroup` (*dict*):
- `id`: name of the column that contains upperGroups unique IDs
- `label`: not required, text displayed under each upperGroups bars,
using ID when it's absent
- `groupsOrder`: not required, order of upperGroups
- `insideGroup` (*dict*):
- `id`: name of the column that contains insideGroups unique IDs
- `label`: not required, text displayed under each insideGroups bars,
using ID when it's absent
- `groupsOrder`: not required, order of insideGroups
- `filters` (*list*): columns to filters on
---
### Example
**Input**
| product_id | played | date | ord | category_id | category_name |
|:------------:|:--------:|:------:|:-----:|:-------------:|:---------------:|
| super clap | 12 | t1 | 1 | clap | Clap |
| clap clap | 1 | t1 | 10 | clap | Clap |
| tac | 1 | t1 | 1 | snare | Snare |
| super clap | 10 | t2 | 1 | clap | Clap |
| tac | 100 | t2 | 1 | snare | Snare |
| bom | 1 | t2 | 1 | tom | Tom |
```cson
waterfall:
upperGroup:
id: 'category_id'
label: 'category_name'
insideGroup:
id: 'product_id'
groupsOrder: 'ord'
date: 'date'
value: 'played'
start:
label: 'Trimestre 1'
id: 't1'
end:
label: 'Trimester 2'
id: 't2'
```
**Output**
| value | label | variation | groups | type | order |
|:-------:|:-----------:|:-----------:|:--------:|:------:|:-------:|
| 14 | Trimestre 1 | NaN | NaN | NaN | NaN |
| -3 | Clap | -0.230769 | clap | parent | NaN |
| -2 | super clap | -0.166667 | clap | child | 1 |
| -1 | clap clap | -1 | clap | child | 10 |
| 99 | Snare | 99 | snare | parent | NaN |
| 99 | tac | 99 | snare | child | 1 |
| 1 | Tom | inf | tom | parent | NaN |
| 1 | bom | inf | tom | child | 1 |
| 111 | Trimester 2 | NaN | NaN | NaN | NaN |
"""
if len(df) == 0:
return df
if filters is not None:
if isinstance(filters, str):
filters = [filters]
def sub_waterfall(df):
wa_df = waterfall(df, date, value, start, end, upperGroup, insideGroup)
for filters_col in filters:
wa_df[filters_col] = df[filters_col].values[0]
return wa_df
# filters df into a list of sub_df
list_of_sub_df = [df[(df[filters].values == i).all(axis=1)]
for i in df[filters].drop_duplicates().values]
return pd.concat([sub_waterfall(df) for df in list_of_sub_df], sort=False)
groups = {
'upperGroup': {
'type': 'parent',
'id': 'upperGroup',
'order': {
'by': ['upperGroup_order', 'groups'],
'ascending': [True, True]
},
'obj': upperGroup
}
}
if insideGroup is not None:
groups['insideGroup'] = {
'type': 'child',
'id': 'insideGroup',
'order': {
'by': ['type', 'insideGroup_order', 'label'],
'ascending': [False, True, True]
},
'obj': insideGroup
}
# prepare the dataframe with standard column names
df = _compute_rename(df, date, value, groups)
agg_conf = {'value': sum}
agg_conf.update({f'{col}_label': 'first' for col in groups.keys()})
agg_conf.update({f'{col}_order': 'first' for col in groups.keys()})
df = df.groupby(list(groups.keys()) + ['date']).agg(agg_conf).reset_index()
df_start, df_end = _compute_start_end(df, start, end)
df = _compute_value_diff(df, start, end, groups)
middle = _compute_upper_group(df)
if insideGroup is not None:
middle = pd.concat([middle, _compute_inside_group(df)])
ret = _compute_order(df_start, df_end, middle, groups)
return ret
|
def waterfall(
df,
date: str,
value: str,
start: Dict[str, str],
end: Dict[str, str],
upperGroup: Dict[str, str],
insideGroup: Dict[str, str] = None,
filters: List[str] = None
):
"""
Return a line for each bars of a waterfall chart, totals, groups, subgroups.
Compute the variation and variation rate for each line.
---
### Parameters
*mandatory :*
- `date` (*str*): name of the column that id the period of each lines
- `value` (*str*): name of the column that contains the vaue for each lines
- `start` (*dict*):
- `label`: text displayed under the first master column
- `id`: value in the date col that id lines for the first period
- `end` (*dict*):
- `label`: text displayed under the last master column
- `id`: value in the date col that id lines for the second period
*optional :*
- `upperGroup` (*dict*):
- `id`: name of the column that contains upperGroups unique IDs
- `label`: not required, text displayed under each upperGroups bars,
using ID when it's absent
- `groupsOrder`: not required, order of upperGroups
- `insideGroup` (*dict*):
- `id`: name of the column that contains insideGroups unique IDs
- `label`: not required, text displayed under each insideGroups bars,
using ID when it's absent
- `groupsOrder`: not required, order of insideGroups
- `filters` (*list*): columns to filters on
---
### Example
**Input**
| product_id | played | date | ord | category_id | category_name |
|:------------:|:--------:|:------:|:-----:|:-------------:|:---------------:|
| super clap | 12 | t1 | 1 | clap | Clap |
| clap clap | 1 | t1 | 10 | clap | Clap |
| tac | 1 | t1 | 1 | snare | Snare |
| super clap | 10 | t2 | 1 | clap | Clap |
| tac | 100 | t2 | 1 | snare | Snare |
| bom | 1 | t2 | 1 | tom | Tom |
```cson
waterfall:
upperGroup:
id: 'category_id'
label: 'category_name'
insideGroup:
id: 'product_id'
groupsOrder: 'ord'
date: 'date'
value: 'played'
start:
label: 'Trimestre 1'
id: 't1'
end:
label: 'Trimester 2'
id: 't2'
```
**Output**
| value | label | variation | groups | type | order |
|:-------:|:-----------:|:-----------:|:--------:|:------:|:-------:|
| 14 | Trimestre 1 | NaN | NaN | NaN | NaN |
| -3 | Clap | -0.230769 | clap | parent | NaN |
| -2 | super clap | -0.166667 | clap | child | 1 |
| -1 | clap clap | -1 | clap | child | 10 |
| 99 | Snare | 99 | snare | parent | NaN |
| 99 | tac | 99 | snare | child | 1 |
| 1 | Tom | inf | tom | parent | NaN |
| 1 | bom | inf | tom | child | 1 |
| 111 | Trimester 2 | NaN | NaN | NaN | NaN |
"""
if len(df) == 0:
return df
if filters is not None:
if isinstance(filters, str):
filters = [filters]
def sub_waterfall(df):
wa_df = waterfall(df, date, value, start, end, upperGroup, insideGroup)
for filters_col in filters:
wa_df[filters_col] = df[filters_col].values[0]
return wa_df
# filters df into a list of sub_df
list_of_sub_df = [df[(df[filters].values == i).all(axis=1)]
for i in df[filters].drop_duplicates().values]
return pd.concat([sub_waterfall(df) for df in list_of_sub_df], sort=False)
groups = {
'upperGroup': {
'type': 'parent',
'id': 'upperGroup',
'order': {
'by': ['upperGroup_order', 'groups'],
'ascending': [True, True]
},
'obj': upperGroup
}
}
if insideGroup is not None:
groups['insideGroup'] = {
'type': 'child',
'id': 'insideGroup',
'order': {
'by': ['type', 'insideGroup_order', 'label'],
'ascending': [False, True, True]
},
'obj': insideGroup
}
# prepare the dataframe with standard column names
df = _compute_rename(df, date, value, groups)
agg_conf = {'value': sum}
agg_conf.update({f'{col}_label': 'first' for col in groups.keys()})
agg_conf.update({f'{col}_order': 'first' for col in groups.keys()})
df = df.groupby(list(groups.keys()) + ['date']).agg(agg_conf).reset_index()
df_start, df_end = _compute_start_end(df, start, end)
df = _compute_value_diff(df, start, end, groups)
middle = _compute_upper_group(df)
if insideGroup is not None:
middle = pd.concat([middle, _compute_inside_group(df)])
ret = _compute_order(df_start, df_end, middle, groups)
return ret
|
[
"Return",
"a",
"line",
"for",
"each",
"bars",
"of",
"a",
"waterfall",
"chart",
"totals",
"groups",
"subgroups",
".",
"Compute",
"the",
"variation",
"and",
"variation",
"rate",
"for",
"each",
"line",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/waterfall.py#L5-L153
|
[
"def",
"waterfall",
"(",
"df",
",",
"date",
":",
"str",
",",
"value",
":",
"str",
",",
"start",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
",",
"end",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
",",
"upperGroup",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
",",
"insideGroup",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
"=",
"None",
",",
"filters",
":",
"List",
"[",
"str",
"]",
"=",
"None",
")",
":",
"if",
"len",
"(",
"df",
")",
"==",
"0",
":",
"return",
"df",
"if",
"filters",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"filters",
",",
"str",
")",
":",
"filters",
"=",
"[",
"filters",
"]",
"def",
"sub_waterfall",
"(",
"df",
")",
":",
"wa_df",
"=",
"waterfall",
"(",
"df",
",",
"date",
",",
"value",
",",
"start",
",",
"end",
",",
"upperGroup",
",",
"insideGroup",
")",
"for",
"filters_col",
"in",
"filters",
":",
"wa_df",
"[",
"filters_col",
"]",
"=",
"df",
"[",
"filters_col",
"]",
".",
"values",
"[",
"0",
"]",
"return",
"wa_df",
"# filters df into a list of sub_df",
"list_of_sub_df",
"=",
"[",
"df",
"[",
"(",
"df",
"[",
"filters",
"]",
".",
"values",
"==",
"i",
")",
".",
"all",
"(",
"axis",
"=",
"1",
")",
"]",
"for",
"i",
"in",
"df",
"[",
"filters",
"]",
".",
"drop_duplicates",
"(",
")",
".",
"values",
"]",
"return",
"pd",
".",
"concat",
"(",
"[",
"sub_waterfall",
"(",
"df",
")",
"for",
"df",
"in",
"list_of_sub_df",
"]",
",",
"sort",
"=",
"False",
")",
"groups",
"=",
"{",
"'upperGroup'",
":",
"{",
"'type'",
":",
"'parent'",
",",
"'id'",
":",
"'upperGroup'",
",",
"'order'",
":",
"{",
"'by'",
":",
"[",
"'upperGroup_order'",
",",
"'groups'",
"]",
",",
"'ascending'",
":",
"[",
"True",
",",
"True",
"]",
"}",
",",
"'obj'",
":",
"upperGroup",
"}",
"}",
"if",
"insideGroup",
"is",
"not",
"None",
":",
"groups",
"[",
"'insideGroup'",
"]",
"=",
"{",
"'type'",
":",
"'child'",
",",
"'id'",
":",
"'insideGroup'",
",",
"'order'",
":",
"{",
"'by'",
":",
"[",
"'type'",
",",
"'insideGroup_order'",
",",
"'label'",
"]",
",",
"'ascending'",
":",
"[",
"False",
",",
"True",
",",
"True",
"]",
"}",
",",
"'obj'",
":",
"insideGroup",
"}",
"# prepare the dataframe with standard column names",
"df",
"=",
"_compute_rename",
"(",
"df",
",",
"date",
",",
"value",
",",
"groups",
")",
"agg_conf",
"=",
"{",
"'value'",
":",
"sum",
"}",
"agg_conf",
".",
"update",
"(",
"{",
"f'{col}_label'",
":",
"'first'",
"for",
"col",
"in",
"groups",
".",
"keys",
"(",
")",
"}",
")",
"agg_conf",
".",
"update",
"(",
"{",
"f'{col}_order'",
":",
"'first'",
"for",
"col",
"in",
"groups",
".",
"keys",
"(",
")",
"}",
")",
"df",
"=",
"df",
".",
"groupby",
"(",
"list",
"(",
"groups",
".",
"keys",
"(",
")",
")",
"+",
"[",
"'date'",
"]",
")",
".",
"agg",
"(",
"agg_conf",
")",
".",
"reset_index",
"(",
")",
"df_start",
",",
"df_end",
"=",
"_compute_start_end",
"(",
"df",
",",
"start",
",",
"end",
")",
"df",
"=",
"_compute_value_diff",
"(",
"df",
",",
"start",
",",
"end",
",",
"groups",
")",
"middle",
"=",
"_compute_upper_group",
"(",
"df",
")",
"if",
"insideGroup",
"is",
"not",
"None",
":",
"middle",
"=",
"pd",
".",
"concat",
"(",
"[",
"middle",
",",
"_compute_inside_group",
"(",
"df",
")",
"]",
")",
"ret",
"=",
"_compute_order",
"(",
"df_start",
",",
"df_end",
",",
"middle",
",",
"groups",
")",
"return",
"ret"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
_compute_start_end
|
Compute two dataframes with value for start and end
Args:
totals(dataframe):
Returns: Dataframe, Dataframe
|
toucan_data_sdk/utils/postprocess/waterfall.py
|
def _compute_start_end(df, start, end):
"""
Compute two dataframes with value for start and end
Args:
totals(dataframe):
Returns: Dataframe, Dataframe
"""
result = {}
time_dict = {'start': start, 'end': end}
totals = df.groupby('date').agg({'value': sum}).reset_index()
for time_name, time in time_dict.items():
if not totals[totals['date'] == time['id']].empty:
value = totals.loc[
totals['date'] == time['id'], 'value'
].values[0]
else:
value = 0
result[time_name] = pd.DataFrame([{
'value': value,
'label': time['label'],
'groups': time['label']
}])
return result['start'], result['end']
|
def _compute_start_end(df, start, end):
"""
Compute two dataframes with value for start and end
Args:
totals(dataframe):
Returns: Dataframe, Dataframe
"""
result = {}
time_dict = {'start': start, 'end': end}
totals = df.groupby('date').agg({'value': sum}).reset_index()
for time_name, time in time_dict.items():
if not totals[totals['date'] == time['id']].empty:
value = totals.loc[
totals['date'] == time['id'], 'value'
].values[0]
else:
value = 0
result[time_name] = pd.DataFrame([{
'value': value,
'label': time['label'],
'groups': time['label']
}])
return result['start'], result['end']
|
[
"Compute",
"two",
"dataframes",
"with",
"value",
"for",
"start",
"and",
"end",
"Args",
":",
"totals",
"(",
"dataframe",
")",
":"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/waterfall.py#L174-L198
|
[
"def",
"_compute_start_end",
"(",
"df",
",",
"start",
",",
"end",
")",
":",
"result",
"=",
"{",
"}",
"time_dict",
"=",
"{",
"'start'",
":",
"start",
",",
"'end'",
":",
"end",
"}",
"totals",
"=",
"df",
".",
"groupby",
"(",
"'date'",
")",
".",
"agg",
"(",
"{",
"'value'",
":",
"sum",
"}",
")",
".",
"reset_index",
"(",
")",
"for",
"time_name",
",",
"time",
"in",
"time_dict",
".",
"items",
"(",
")",
":",
"if",
"not",
"totals",
"[",
"totals",
"[",
"'date'",
"]",
"==",
"time",
"[",
"'id'",
"]",
"]",
".",
"empty",
":",
"value",
"=",
"totals",
".",
"loc",
"[",
"totals",
"[",
"'date'",
"]",
"==",
"time",
"[",
"'id'",
"]",
",",
"'value'",
"]",
".",
"values",
"[",
"0",
"]",
"else",
":",
"value",
"=",
"0",
"result",
"[",
"time_name",
"]",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"{",
"'value'",
":",
"value",
",",
"'label'",
":",
"time",
"[",
"'label'",
"]",
",",
"'groups'",
":",
"time",
"[",
"'label'",
"]",
"}",
"]",
")",
"return",
"result",
"[",
"'start'",
"]",
",",
"result",
"[",
"'end'",
"]"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
_compute_value_diff
|
Compute diff value between start and end
Args:
df(dataframe):
Returns: Dataframe
|
toucan_data_sdk/utils/postprocess/waterfall.py
|
def _compute_value_diff(df, start, end, groups):
"""
Compute diff value between start and end
Args:
df(dataframe):
Returns: Dataframe
"""
start_values = df[df['date'] == start['id']].copy()
end_values = df[df['date'] == end['id']].copy()
merge_on = []
for key, group in groups.items():
merge_on = merge_on + [key, f'{key}_label', f'{key}_order']
df = start_values.merge(end_values,
on=merge_on,
how='outer',
suffixes=('_start', '_end'), )
# necessary before calculating variation
df[['value_start', 'value_end']] = df[['value_start', 'value_end']].fillna(0)
df['value'] = df['value_end'] - df['value_start']
df.drop(['date_start', 'date_end', 'value_end'], axis=1, inplace=True)
df.rename(columns={'upperGroup': 'groups'}, inplace=True)
return df
|
def _compute_value_diff(df, start, end, groups):
"""
Compute diff value between start and end
Args:
df(dataframe):
Returns: Dataframe
"""
start_values = df[df['date'] == start['id']].copy()
end_values = df[df['date'] == end['id']].copy()
merge_on = []
for key, group in groups.items():
merge_on = merge_on + [key, f'{key}_label', f'{key}_order']
df = start_values.merge(end_values,
on=merge_on,
how='outer',
suffixes=('_start', '_end'), )
# necessary before calculating variation
df[['value_start', 'value_end']] = df[['value_start', 'value_end']].fillna(0)
df['value'] = df['value_end'] - df['value_start']
df.drop(['date_start', 'date_end', 'value_end'], axis=1, inplace=True)
df.rename(columns={'upperGroup': 'groups'}, inplace=True)
return df
|
[
"Compute",
"diff",
"value",
"between",
"start",
"and",
"end",
"Args",
":",
"df",
"(",
"dataframe",
")",
":"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/waterfall.py#L201-L227
|
[
"def",
"_compute_value_diff",
"(",
"df",
",",
"start",
",",
"end",
",",
"groups",
")",
":",
"start_values",
"=",
"df",
"[",
"df",
"[",
"'date'",
"]",
"==",
"start",
"[",
"'id'",
"]",
"]",
".",
"copy",
"(",
")",
"end_values",
"=",
"df",
"[",
"df",
"[",
"'date'",
"]",
"==",
"end",
"[",
"'id'",
"]",
"]",
".",
"copy",
"(",
")",
"merge_on",
"=",
"[",
"]",
"for",
"key",
",",
"group",
"in",
"groups",
".",
"items",
"(",
")",
":",
"merge_on",
"=",
"merge_on",
"+",
"[",
"key",
",",
"f'{key}_label'",
",",
"f'{key}_order'",
"]",
"df",
"=",
"start_values",
".",
"merge",
"(",
"end_values",
",",
"on",
"=",
"merge_on",
",",
"how",
"=",
"'outer'",
",",
"suffixes",
"=",
"(",
"'_start'",
",",
"'_end'",
")",
",",
")",
"# necessary before calculating variation",
"df",
"[",
"[",
"'value_start'",
",",
"'value_end'",
"]",
"]",
"=",
"df",
"[",
"[",
"'value_start'",
",",
"'value_end'",
"]",
"]",
".",
"fillna",
"(",
"0",
")",
"df",
"[",
"'value'",
"]",
"=",
"df",
"[",
"'value_end'",
"]",
"-",
"df",
"[",
"'value_start'",
"]",
"df",
".",
"drop",
"(",
"[",
"'date_start'",
",",
"'date_end'",
",",
"'value_end'",
"]",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"df",
".",
"rename",
"(",
"columns",
"=",
"{",
"'upperGroup'",
":",
"'groups'",
"}",
",",
"inplace",
"=",
"True",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
_compute_inside_group
|
Compute inside Group
Args:
df(dataframe):
Returns: Dataframe
|
toucan_data_sdk/utils/postprocess/waterfall.py
|
def _compute_inside_group(df):
"""
Compute inside Group
Args:
df(dataframe):
Returns: Dataframe
"""
inside_group = df.copy()
inside_group['type'] = 'child'
inside_group['variation'] = inside_group['value'] / inside_group[
'value_start']
inside_group.drop(['upperGroup_label', 'insideGroup', 'value_start'],
axis=1, inplace=True)
inside_group.rename(columns={'insideGroup_label': 'label'},
inplace=True)
return inside_group
|
def _compute_inside_group(df):
"""
Compute inside Group
Args:
df(dataframe):
Returns: Dataframe
"""
inside_group = df.copy()
inside_group['type'] = 'child'
inside_group['variation'] = inside_group['value'] / inside_group[
'value_start']
inside_group.drop(['upperGroup_label', 'insideGroup', 'value_start'],
axis=1, inplace=True)
inside_group.rename(columns={'insideGroup_label': 'label'},
inplace=True)
return inside_group
|
[
"Compute",
"inside",
"Group",
"Args",
":",
"df",
"(",
"dataframe",
")",
":"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/waterfall.py#L230-L247
|
[
"def",
"_compute_inside_group",
"(",
"df",
")",
":",
"inside_group",
"=",
"df",
".",
"copy",
"(",
")",
"inside_group",
"[",
"'type'",
"]",
"=",
"'child'",
"inside_group",
"[",
"'variation'",
"]",
"=",
"inside_group",
"[",
"'value'",
"]",
"/",
"inside_group",
"[",
"'value_start'",
"]",
"inside_group",
".",
"drop",
"(",
"[",
"'upperGroup_label'",
",",
"'insideGroup'",
",",
"'value_start'",
"]",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"inside_group",
".",
"rename",
"(",
"columns",
"=",
"{",
"'insideGroup_label'",
":",
"'label'",
"}",
",",
"inplace",
"=",
"True",
")",
"return",
"inside_group"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
_compute_upper_group
|
Compute upperGroup
Args:
df (Dataframe):
Returns: Dataframe
|
toucan_data_sdk/utils/postprocess/waterfall.py
|
def _compute_upper_group(df):
"""
Compute upperGroup
Args:
df (Dataframe):
Returns: Dataframe
"""
upper_group = df.groupby(['groups']).agg({
'value': sum,
'value_start': sum,
'upperGroup_label': 'first',
'upperGroup_order': 'first'
}).reset_index()
upper_group['type'] = 'parent'
upper_group['variation'] = upper_group['value'] / upper_group[
'value_start']
upper_group.drop(['value_start'], axis=1, inplace=True)
upper_group.rename(columns={'upperGroup_label': 'label'}, inplace=True)
return upper_group
|
def _compute_upper_group(df):
"""
Compute upperGroup
Args:
df (Dataframe):
Returns: Dataframe
"""
upper_group = df.groupby(['groups']).agg({
'value': sum,
'value_start': sum,
'upperGroup_label': 'first',
'upperGroup_order': 'first'
}).reset_index()
upper_group['type'] = 'parent'
upper_group['variation'] = upper_group['value'] / upper_group[
'value_start']
upper_group.drop(['value_start'], axis=1, inplace=True)
upper_group.rename(columns={'upperGroup_label': 'label'}, inplace=True)
return upper_group
|
[
"Compute",
"upperGroup",
"Args",
":",
"df",
"(",
"Dataframe",
")",
":"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/waterfall.py#L250-L270
|
[
"def",
"_compute_upper_group",
"(",
"df",
")",
":",
"upper_group",
"=",
"df",
".",
"groupby",
"(",
"[",
"'groups'",
"]",
")",
".",
"agg",
"(",
"{",
"'value'",
":",
"sum",
",",
"'value_start'",
":",
"sum",
",",
"'upperGroup_label'",
":",
"'first'",
",",
"'upperGroup_order'",
":",
"'first'",
"}",
")",
".",
"reset_index",
"(",
")",
"upper_group",
"[",
"'type'",
"]",
"=",
"'parent'",
"upper_group",
"[",
"'variation'",
"]",
"=",
"upper_group",
"[",
"'value'",
"]",
"/",
"upper_group",
"[",
"'value_start'",
"]",
"upper_group",
".",
"drop",
"(",
"[",
"'value_start'",
"]",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"upper_group",
".",
"rename",
"(",
"columns",
"=",
"{",
"'upperGroup_label'",
":",
"'label'",
"}",
",",
"inplace",
"=",
"True",
")",
"return",
"upper_group"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
_basic_math_operation
|
Basic mathematical operation to apply operator on `column_1` and `column_2`
Both can be either a number or the name of a column of `df`
Will create a new column named `new_column`
|
toucan_data_sdk/utils/postprocess/math.py
|
def _basic_math_operation(df, new_column, column_1, column_2, op):
"""
Basic mathematical operation to apply operator on `column_1` and `column_2`
Both can be either a number or the name of a column of `df`
Will create a new column named `new_column`
"""
if not isinstance(column_1, (str, int, float)):
raise TypeError(f'column_1 must be a string, an integer or a float')
if not isinstance(column_2, (str, int, float)):
raise TypeError(f'column_2 must be a string, an integer or a float')
if isinstance(column_1, str):
column_1 = df[column_1]
if isinstance(column_2, str):
column_2 = df[column_2]
operator = getattr(_operator, op)
df[new_column] = operator(column_1, column_2)
return df
|
def _basic_math_operation(df, new_column, column_1, column_2, op):
"""
Basic mathematical operation to apply operator on `column_1` and `column_2`
Both can be either a number or the name of a column of `df`
Will create a new column named `new_column`
"""
if not isinstance(column_1, (str, int, float)):
raise TypeError(f'column_1 must be a string, an integer or a float')
if not isinstance(column_2, (str, int, float)):
raise TypeError(f'column_2 must be a string, an integer or a float')
if isinstance(column_1, str):
column_1 = df[column_1]
if isinstance(column_2, str):
column_2 = df[column_2]
operator = getattr(_operator, op)
df[new_column] = operator(column_1, column_2)
return df
|
[
"Basic",
"mathematical",
"operation",
"to",
"apply",
"operator",
"on",
"column_1",
"and",
"column_2",
"Both",
"can",
"be",
"either",
"a",
"number",
"or",
"the",
"name",
"of",
"a",
"column",
"of",
"df",
"Will",
"create",
"a",
"new",
"column",
"named",
"new_column"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/math.py#L7-L24
|
[
"def",
"_basic_math_operation",
"(",
"df",
",",
"new_column",
",",
"column_1",
",",
"column_2",
",",
"op",
")",
":",
"if",
"not",
"isinstance",
"(",
"column_1",
",",
"(",
"str",
",",
"int",
",",
"float",
")",
")",
":",
"raise",
"TypeError",
"(",
"f'column_1 must be a string, an integer or a float'",
")",
"if",
"not",
"isinstance",
"(",
"column_2",
",",
"(",
"str",
",",
"int",
",",
"float",
")",
")",
":",
"raise",
"TypeError",
"(",
"f'column_2 must be a string, an integer or a float'",
")",
"if",
"isinstance",
"(",
"column_1",
",",
"str",
")",
":",
"column_1",
"=",
"df",
"[",
"column_1",
"]",
"if",
"isinstance",
"(",
"column_2",
",",
"str",
")",
":",
"column_2",
"=",
"df",
"[",
"column_2",
"]",
"operator",
"=",
"getattr",
"(",
"_operator",
",",
"op",
")",
"df",
"[",
"new_column",
"]",
"=",
"operator",
"(",
"column_1",
",",
"column_2",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
add
|
DEPRECATED - use `formula` instead
|
toucan_data_sdk/utils/postprocess/math.py
|
def add(df, new_column, column_1, column_2):
"""
DEPRECATED - use `formula` instead
"""
return _basic_math_operation(df, new_column, column_1, column_2, op='add')
|
def add(df, new_column, column_1, column_2):
"""
DEPRECATED - use `formula` instead
"""
return _basic_math_operation(df, new_column, column_1, column_2, op='add')
|
[
"DEPRECATED",
"-",
"use",
"formula",
"instead"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/math.py#L27-L31
|
[
"def",
"add",
"(",
"df",
",",
"new_column",
",",
"column_1",
",",
"column_2",
")",
":",
"return",
"_basic_math_operation",
"(",
"df",
",",
"new_column",
",",
"column_1",
",",
"column_2",
",",
"op",
"=",
"'add'",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
subtract
|
DEPRECATED - use `formula` instead
|
toucan_data_sdk/utils/postprocess/math.py
|
def subtract(df, new_column, column_1, column_2):
"""
DEPRECATED - use `formula` instead
"""
return _basic_math_operation(df, new_column, column_1, column_2, op='sub')
|
def subtract(df, new_column, column_1, column_2):
"""
DEPRECATED - use `formula` instead
"""
return _basic_math_operation(df, new_column, column_1, column_2, op='sub')
|
[
"DEPRECATED",
"-",
"use",
"formula",
"instead"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/math.py#L34-L38
|
[
"def",
"subtract",
"(",
"df",
",",
"new_column",
",",
"column_1",
",",
"column_2",
")",
":",
"return",
"_basic_math_operation",
"(",
"df",
",",
"new_column",
",",
"column_1",
",",
"column_2",
",",
"op",
"=",
"'sub'",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
multiply
|
DEPRECATED - use `formula` instead
|
toucan_data_sdk/utils/postprocess/math.py
|
def multiply(df, new_column, column_1, column_2):
"""
DEPRECATED - use `formula` instead
"""
return _basic_math_operation(df, new_column, column_1, column_2, op='mul')
|
def multiply(df, new_column, column_1, column_2):
"""
DEPRECATED - use `formula` instead
"""
return _basic_math_operation(df, new_column, column_1, column_2, op='mul')
|
[
"DEPRECATED",
"-",
"use",
"formula",
"instead"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/math.py#L41-L45
|
[
"def",
"multiply",
"(",
"df",
",",
"new_column",
",",
"column_1",
",",
"column_2",
")",
":",
"return",
"_basic_math_operation",
"(",
"df",
",",
"new_column",
",",
"column_1",
",",
"column_2",
",",
"op",
"=",
"'mul'",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
divide
|
DEPRECATED - use `formula` instead
|
toucan_data_sdk/utils/postprocess/math.py
|
def divide(df, new_column, column_1, column_2):
"""
DEPRECATED - use `formula` instead
"""
return _basic_math_operation(df, new_column, column_1, column_2, op='truediv')
|
def divide(df, new_column, column_1, column_2):
"""
DEPRECATED - use `formula` instead
"""
return _basic_math_operation(df, new_column, column_1, column_2, op='truediv')
|
[
"DEPRECATED",
"-",
"use",
"formula",
"instead"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/math.py#L48-L52
|
[
"def",
"divide",
"(",
"df",
",",
"new_column",
",",
"column_1",
",",
"column_2",
")",
":",
"return",
"_basic_math_operation",
"(",
"df",
",",
"new_column",
",",
"column_1",
",",
"column_2",
",",
"op",
"=",
"'truediv'",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
formula
|
Do mathematic operations on columns (add, subtract, multiply or divide)
---
### Parameters
*mandatory:*
- `new_column` (*str*): name of the output column
- `formula` (*str*): Operation on column. Use name of column and special character:
- `+` for addition
- `-` for subtraction
- `*` for multiplication
- `/` for division
**Note:**
- your column name can contain spaces.
- if your column name is a number, you must use a quote mark : `"` or `'` (cf. example)
---
### Examples
**Input**
| variable | valueA | valueB | My rate |
|:--------:|:--------:|:-----:|:------:|
| toto | 20 | 100 | 10 |
| toto | 30 | 200 | 10 |
| toto | 10 | 300 | 10 |
```cson
formula:
new_column: 'valueD'
formula: '(valueB + valueA ) / My rate'
```
**Output**
| variable | valueA | valueB | My rate | valueD |
|:--------:|:--------:|:------:|:-------:|:-------:|
| toto | 20 | 100 | 10 | 12 |
| toto | 30 | 200 | 10 | 23 |
| toto | 10 | 300 | 10 | 31 |
---
**Input**
| variable | 2018 | 2019 |
|:--------:|:--------:|:-----:|
| toto | 20 | 100 |
| toto | 30 | 200 |
| toto | 10 | 300 |
```cson
formula:
new_column: 'Evolution'
formula: "'2019' - '2018'"
```
**Output**
| variable | 2018 | 2019 | Evolution |
|:--------:|:--------:|:-----:|:-----:|
| toto | 20 | 100 | 80 |
| toto | 30 | 200 | 170 |
| toto | 10 | 300 | 290 |
|
toucan_data_sdk/utils/postprocess/math.py
|
def formula(df, *, new_column: str, formula: str):
"""
Do mathematic operations on columns (add, subtract, multiply or divide)
---
### Parameters
*mandatory:*
- `new_column` (*str*): name of the output column
- `formula` (*str*): Operation on column. Use name of column and special character:
- `+` for addition
- `-` for subtraction
- `*` for multiplication
- `/` for division
**Note:**
- your column name can contain spaces.
- if your column name is a number, you must use a quote mark : `"` or `'` (cf. example)
---
### Examples
**Input**
| variable | valueA | valueB | My rate |
|:--------:|:--------:|:-----:|:------:|
| toto | 20 | 100 | 10 |
| toto | 30 | 200 | 10 |
| toto | 10 | 300 | 10 |
```cson
formula:
new_column: 'valueD'
formula: '(valueB + valueA ) / My rate'
```
**Output**
| variable | valueA | valueB | My rate | valueD |
|:--------:|:--------:|:------:|:-------:|:-------:|
| toto | 20 | 100 | 10 | 12 |
| toto | 30 | 200 | 10 | 23 |
| toto | 10 | 300 | 10 | 31 |
---
**Input**
| variable | 2018 | 2019 |
|:--------:|:--------:|:-----:|
| toto | 20 | 100 |
| toto | 30 | 200 |
| toto | 10 | 300 |
```cson
formula:
new_column: 'Evolution'
formula: "'2019' - '2018'"
```
**Output**
| variable | 2018 | 2019 | Evolution |
|:--------:|:--------:|:-----:|:-----:|
| toto | 20 | 100 | 80 |
| toto | 30 | 200 | 170 |
| toto | 10 | 300 | 290 |
"""
tokens = _parse_formula(formula)
expression_splitted = []
for t in tokens:
# To use a column name with only digits, it has to be quoted!
# Otherwise it is considered as a regular number
if not t.quoted and (t in MATH_CHARACTERS or is_float(t)):
expression_splitted.append(t)
elif t in df.columns:
expression_splitted.append(f'df["{t}"]')
else:
raise FormulaError(f'"{t}" is not a valid column name')
expression = ''.join(expression_splitted)
df[new_column] = eval(expression)
return df
|
def formula(df, *, new_column: str, formula: str):
"""
Do mathematic operations on columns (add, subtract, multiply or divide)
---
### Parameters
*mandatory:*
- `new_column` (*str*): name of the output column
- `formula` (*str*): Operation on column. Use name of column and special character:
- `+` for addition
- `-` for subtraction
- `*` for multiplication
- `/` for division
**Note:**
- your column name can contain spaces.
- if your column name is a number, you must use a quote mark : `"` or `'` (cf. example)
---
### Examples
**Input**
| variable | valueA | valueB | My rate |
|:--------:|:--------:|:-----:|:------:|
| toto | 20 | 100 | 10 |
| toto | 30 | 200 | 10 |
| toto | 10 | 300 | 10 |
```cson
formula:
new_column: 'valueD'
formula: '(valueB + valueA ) / My rate'
```
**Output**
| variable | valueA | valueB | My rate | valueD |
|:--------:|:--------:|:------:|:-------:|:-------:|
| toto | 20 | 100 | 10 | 12 |
| toto | 30 | 200 | 10 | 23 |
| toto | 10 | 300 | 10 | 31 |
---
**Input**
| variable | 2018 | 2019 |
|:--------:|:--------:|:-----:|
| toto | 20 | 100 |
| toto | 30 | 200 |
| toto | 10 | 300 |
```cson
formula:
new_column: 'Evolution'
formula: "'2019' - '2018'"
```
**Output**
| variable | 2018 | 2019 | Evolution |
|:--------:|:--------:|:-----:|:-----:|
| toto | 20 | 100 | 80 |
| toto | 30 | 200 | 170 |
| toto | 10 | 300 | 290 |
"""
tokens = _parse_formula(formula)
expression_splitted = []
for t in tokens:
# To use a column name with only digits, it has to be quoted!
# Otherwise it is considered as a regular number
if not t.quoted and (t in MATH_CHARACTERS or is_float(t)):
expression_splitted.append(t)
elif t in df.columns:
expression_splitted.append(f'df["{t}"]')
else:
raise FormulaError(f'"{t}" is not a valid column name')
expression = ''.join(expression_splitted)
df[new_column] = eval(expression)
return df
|
[
"Do",
"mathematic",
"operations",
"on",
"columns",
"(",
"add",
"subtract",
"multiply",
"or",
"divide",
")"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/math.py#L103-L188
|
[
"def",
"formula",
"(",
"df",
",",
"*",
",",
"new_column",
":",
"str",
",",
"formula",
":",
"str",
")",
":",
"tokens",
"=",
"_parse_formula",
"(",
"formula",
")",
"expression_splitted",
"=",
"[",
"]",
"for",
"t",
"in",
"tokens",
":",
"# To use a column name with only digits, it has to be quoted!",
"# Otherwise it is considered as a regular number",
"if",
"not",
"t",
".",
"quoted",
"and",
"(",
"t",
"in",
"MATH_CHARACTERS",
"or",
"is_float",
"(",
"t",
")",
")",
":",
"expression_splitted",
".",
"append",
"(",
"t",
")",
"elif",
"t",
"in",
"df",
".",
"columns",
":",
"expression_splitted",
".",
"append",
"(",
"f'df[\"{t}\"]'",
")",
"else",
":",
"raise",
"FormulaError",
"(",
"f'\"{t}\" is not a valid column name'",
")",
"expression",
"=",
"''",
".",
"join",
"(",
"expression_splitted",
")",
"df",
"[",
"new_column",
"]",
"=",
"eval",
"(",
"expression",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
round_values
|
Round each value of a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to round
- `decimals` (*int*): number of decimal to keeep
*optional :*
- `new_column` (*str*): name of the new column to create.
By default, no new column will be created and `column` will be replaced
---
### Example
** Input**
ENTITY|VALUE_1|VALUE_2
:-----:|:-----:|:-----:
A|-1.512|-1.504
A|0.432|0.14
```cson
round_values:
column: 'VALUE_1'
decimals:1
new_column: 'Pika'
```
**Output**
ENTITY|VALUE_1|VALUE_2|Pika
:-----:|:-----:|:-----:|:-----:
A|-1.512|-1.504|-1.5
A|0.432|0.14|0.4
|
toucan_data_sdk/utils/postprocess/math.py
|
def round_values(df, *, column: str, decimals: int, new_column: str = None):
"""
Round each value of a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to round
- `decimals` (*int*): number of decimal to keeep
*optional :*
- `new_column` (*str*): name of the new column to create.
By default, no new column will be created and `column` will be replaced
---
### Example
** Input**
ENTITY|VALUE_1|VALUE_2
:-----:|:-----:|:-----:
A|-1.512|-1.504
A|0.432|0.14
```cson
round_values:
column: 'VALUE_1'
decimals:1
new_column: 'Pika'
```
**Output**
ENTITY|VALUE_1|VALUE_2|Pika
:-----:|:-----:|:-----:|:-----:
A|-1.512|-1.504|-1.5
A|0.432|0.14|0.4
"""
new_column = new_column or column
df[new_column] = df[column].round(decimals)
return df
|
def round_values(df, *, column: str, decimals: int, new_column: str = None):
"""
Round each value of a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to round
- `decimals` (*int*): number of decimal to keeep
*optional :*
- `new_column` (*str*): name of the new column to create.
By default, no new column will be created and `column` will be replaced
---
### Example
** Input**
ENTITY|VALUE_1|VALUE_2
:-----:|:-----:|:-----:
A|-1.512|-1.504
A|0.432|0.14
```cson
round_values:
column: 'VALUE_1'
decimals:1
new_column: 'Pika'
```
**Output**
ENTITY|VALUE_1|VALUE_2|Pika
:-----:|:-----:|:-----:|:-----:
A|-1.512|-1.504|-1.5
A|0.432|0.14|0.4
"""
new_column = new_column or column
df[new_column] = df[column].round(decimals)
return df
|
[
"Round",
"each",
"value",
"of",
"a",
"column"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/math.py#L195-L238
|
[
"def",
"round_values",
"(",
"df",
",",
"*",
",",
"column",
":",
"str",
",",
"decimals",
":",
"int",
",",
"new_column",
":",
"str",
"=",
"None",
")",
":",
"new_column",
"=",
"new_column",
"or",
"column",
"df",
"[",
"new_column",
"]",
"=",
"df",
"[",
"column",
"]",
".",
"round",
"(",
"decimals",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
absolute_values
|
Get the absolute numeric value of each element of a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column
*optional :*
- `new_column` (*str*): name of the column containing the result.
By default, no new column will be created and `column` will be replaced.
---
### Example
**Input**
| ENTITY | VALUE_1 | VALUE_2 |
|:------:|:-------:|:-------:|
| A | -1.512 | -1.504 |
| A | 0.432 | 0.14 |
```cson
absolute_values:
column: 'VALUE_1'
new_column: 'Pika'
```
**Output**
| ENTITY | VALUE_1 | VALUE_2 | Pika |
|:------:|:-------:|:-------:|:-----:|
| A | -1.512 | -1.504 | 1.512 |
| A | 0.432 | 0.14 | 0.432 |
|
toucan_data_sdk/utils/postprocess/math.py
|
def absolute_values(df, *, column: str, new_column: str = None):
"""
Get the absolute numeric value of each element of a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column
*optional :*
- `new_column` (*str*): name of the column containing the result.
By default, no new column will be created and `column` will be replaced.
---
### Example
**Input**
| ENTITY | VALUE_1 | VALUE_2 |
|:------:|:-------:|:-------:|
| A | -1.512 | -1.504 |
| A | 0.432 | 0.14 |
```cson
absolute_values:
column: 'VALUE_1'
new_column: 'Pika'
```
**Output**
| ENTITY | VALUE_1 | VALUE_2 | Pika |
|:------:|:-------:|:-------:|:-----:|
| A | -1.512 | -1.504 | 1.512 |
| A | 0.432 | 0.14 | 0.432 |
"""
new_column = new_column or column
df[new_column] = abs(df[column])
return df
|
def absolute_values(df, *, column: str, new_column: str = None):
"""
Get the absolute numeric value of each element of a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column
*optional :*
- `new_column` (*str*): name of the column containing the result.
By default, no new column will be created and `column` will be replaced.
---
### Example
**Input**
| ENTITY | VALUE_1 | VALUE_2 |
|:------:|:-------:|:-------:|
| A | -1.512 | -1.504 |
| A | 0.432 | 0.14 |
```cson
absolute_values:
column: 'VALUE_1'
new_column: 'Pika'
```
**Output**
| ENTITY | VALUE_1 | VALUE_2 | Pika |
|:------:|:-------:|:-------:|:-----:|
| A | -1.512 | -1.504 | 1.512 |
| A | 0.432 | 0.14 | 0.432 |
"""
new_column = new_column or column
df[new_column] = abs(df[column])
return df
|
[
"Get",
"the",
"absolute",
"numeric",
"value",
"of",
"each",
"element",
"of",
"a",
"column"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/math.py#L241-L282
|
[
"def",
"absolute_values",
"(",
"df",
",",
"*",
",",
"column",
":",
"str",
",",
"new_column",
":",
"str",
"=",
"None",
")",
":",
"new_column",
"=",
"new_column",
"or",
"column",
"df",
"[",
"new_column",
"]",
"=",
"abs",
"(",
"df",
"[",
"column",
"]",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
pivot
|
Pivot the data. Reverse operation of melting
---
### Parameters
*mandatory :*
- `index` (*list*): names of index columns.
- `column` (*str*): column name to pivot on
- `value` (*str*): column name containing the value to fill the pivoted df
*optional :*
- `agg_function` (*str*): aggregation function to use among 'mean' (default), 'count', 'mean', 'max', 'min'
---
### Example
**Input**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 250 |
| toto | wave 1 | 2016 | 450 |
```cson
pivot:
index: ['variable','wave']
column: 'year'
value: 'value'
```
**Output**
| variable | wave | 2014 | 2015 | 2015 |
|:--------:|:-------:|:------:|:----:|:----:|
| toto | wave 1 | 300 | 250 | 450 |
|
toucan_data_sdk/utils/postprocess/pivot.py
|
def pivot(df, index: List[str], column: str, value: str, agg_function: str = 'mean'):
"""
Pivot the data. Reverse operation of melting
---
### Parameters
*mandatory :*
- `index` (*list*): names of index columns.
- `column` (*str*): column name to pivot on
- `value` (*str*): column name containing the value to fill the pivoted df
*optional :*
- `agg_function` (*str*): aggregation function to use among 'mean' (default), 'count', 'mean', 'max', 'min'
---
### Example
**Input**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 250 |
| toto | wave 1 | 2016 | 450 |
```cson
pivot:
index: ['variable','wave']
column: 'year'
value: 'value'
```
**Output**
| variable | wave | 2014 | 2015 | 2015 |
|:--------:|:-------:|:------:|:----:|:----:|
| toto | wave 1 | 300 | 250 | 450 |
"""
if df.dtypes[value].type == np.object_:
df = pd.pivot_table(df, index=index,
columns=column,
values=value,
aggfunc=lambda x: ' '.join(x))
else:
df = pd.pivot_table(df, index=index,
columns=column,
values=value,
aggfunc=agg_function)
df = df.reset_index()
return df
|
def pivot(df, index: List[str], column: str, value: str, agg_function: str = 'mean'):
"""
Pivot the data. Reverse operation of melting
---
### Parameters
*mandatory :*
- `index` (*list*): names of index columns.
- `column` (*str*): column name to pivot on
- `value` (*str*): column name containing the value to fill the pivoted df
*optional :*
- `agg_function` (*str*): aggregation function to use among 'mean' (default), 'count', 'mean', 'max', 'min'
---
### Example
**Input**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 250 |
| toto | wave 1 | 2016 | 450 |
```cson
pivot:
index: ['variable','wave']
column: 'year'
value: 'value'
```
**Output**
| variable | wave | 2014 | 2015 | 2015 |
|:--------:|:-------:|:------:|:----:|:----:|
| toto | wave 1 | 300 | 250 | 450 |
"""
if df.dtypes[value].type == np.object_:
df = pd.pivot_table(df, index=index,
columns=column,
values=value,
aggfunc=lambda x: ' '.join(x))
else:
df = pd.pivot_table(df, index=index,
columns=column,
values=value,
aggfunc=agg_function)
df = df.reset_index()
return df
|
[
"Pivot",
"the",
"data",
".",
"Reverse",
"operation",
"of",
"melting"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/pivot.py#L6-L58
|
[
"def",
"pivot",
"(",
"df",
",",
"index",
":",
"List",
"[",
"str",
"]",
",",
"column",
":",
"str",
",",
"value",
":",
"str",
",",
"agg_function",
":",
"str",
"=",
"'mean'",
")",
":",
"if",
"df",
".",
"dtypes",
"[",
"value",
"]",
".",
"type",
"==",
"np",
".",
"object_",
":",
"df",
"=",
"pd",
".",
"pivot_table",
"(",
"df",
",",
"index",
"=",
"index",
",",
"columns",
"=",
"column",
",",
"values",
"=",
"value",
",",
"aggfunc",
"=",
"lambda",
"x",
":",
"' '",
".",
"join",
"(",
"x",
")",
")",
"else",
":",
"df",
"=",
"pd",
".",
"pivot_table",
"(",
"df",
",",
"index",
"=",
"index",
",",
"columns",
"=",
"column",
",",
"values",
"=",
"value",
",",
"aggfunc",
"=",
"agg_function",
")",
"df",
"=",
"df",
".",
"reset_index",
"(",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
pivot_by_group
|
Pivot a dataframe by group of variables
---
### Parameters
*mandatory :*
* `variable` (*str*): name of the column used to create the groups.
* `value` (*str*): name of the column containing the value to fill the pivoted df.
* `new_columns` (*list of str*): names of the new columns.
* `groups` (*dict*): names of the groups with their corresponding variables.
**Warning**: the list of variables must have the same order as `new_columns`
*optional :*
* `id_cols` (*list of str*) : names of other columns to keep, default `None`.
---
### Example
**Input**
| type | variable | montant |
|:----:|:----------:|:-------:|
| A | var1 | 5 |
| A | var1_evol | 0.3 |
| A | var2 | 6 |
| A | var2_evol | 0.2 |
```cson
pivot_by_group :
id_cols: ['type']
variable: 'variable'
value: 'montant'
new_columns: ['value', 'variation']
groups:
'Group 1' : ['var1', 'var1_evol']
'Group 2' : ['var2', 'var2_evol']
```
**Ouput**
| type | variable | value | variation |
|:----:|:----------:|:-------:|:---------:|
| A | Group 1 | 5 | 0.3 |
| A | Group 2 | 6 | 0.2 |
|
toucan_data_sdk/utils/postprocess/pivot.py
|
def pivot_by_group(
df,
variable,
value,
new_columns,
groups,
id_cols=None
):
"""
Pivot a dataframe by group of variables
---
### Parameters
*mandatory :*
* `variable` (*str*): name of the column used to create the groups.
* `value` (*str*): name of the column containing the value to fill the pivoted df.
* `new_columns` (*list of str*): names of the new columns.
* `groups` (*dict*): names of the groups with their corresponding variables.
**Warning**: the list of variables must have the same order as `new_columns`
*optional :*
* `id_cols` (*list of str*) : names of other columns to keep, default `None`.
---
### Example
**Input**
| type | variable | montant |
|:----:|:----------:|:-------:|
| A | var1 | 5 |
| A | var1_evol | 0.3 |
| A | var2 | 6 |
| A | var2_evol | 0.2 |
```cson
pivot_by_group :
id_cols: ['type']
variable: 'variable'
value: 'montant'
new_columns: ['value', 'variation']
groups:
'Group 1' : ['var1', 'var1_evol']
'Group 2' : ['var2', 'var2_evol']
```
**Ouput**
| type | variable | value | variation |
|:----:|:----------:|:-------:|:---------:|
| A | Group 1 | 5 | 0.3 |
| A | Group 2 | 6 | 0.2 |
"""
if id_cols is None:
index = [variable]
else:
index = [variable] + id_cols
param = pd.DataFrame(groups, index=new_columns)
temporary_colum = 'tmp'
df[temporary_colum] = df[variable]
for column in param.columns:
df.loc[df[variable].isin(param[column]), variable] = column
param = param.T
for column in param.columns:
df.loc[
df[temporary_colum].isin(param[column]), temporary_colum] = column
df = pivot(df, index, temporary_colum, value)
return df
|
def pivot_by_group(
df,
variable,
value,
new_columns,
groups,
id_cols=None
):
"""
Pivot a dataframe by group of variables
---
### Parameters
*mandatory :*
* `variable` (*str*): name of the column used to create the groups.
* `value` (*str*): name of the column containing the value to fill the pivoted df.
* `new_columns` (*list of str*): names of the new columns.
* `groups` (*dict*): names of the groups with their corresponding variables.
**Warning**: the list of variables must have the same order as `new_columns`
*optional :*
* `id_cols` (*list of str*) : names of other columns to keep, default `None`.
---
### Example
**Input**
| type | variable | montant |
|:----:|:----------:|:-------:|
| A | var1 | 5 |
| A | var1_evol | 0.3 |
| A | var2 | 6 |
| A | var2_evol | 0.2 |
```cson
pivot_by_group :
id_cols: ['type']
variable: 'variable'
value: 'montant'
new_columns: ['value', 'variation']
groups:
'Group 1' : ['var1', 'var1_evol']
'Group 2' : ['var2', 'var2_evol']
```
**Ouput**
| type | variable | value | variation |
|:----:|:----------:|:-------:|:---------:|
| A | Group 1 | 5 | 0.3 |
| A | Group 2 | 6 | 0.2 |
"""
if id_cols is None:
index = [variable]
else:
index = [variable] + id_cols
param = pd.DataFrame(groups, index=new_columns)
temporary_colum = 'tmp'
df[temporary_colum] = df[variable]
for column in param.columns:
df.loc[df[variable].isin(param[column]), variable] = column
param = param.T
for column in param.columns:
df.loc[
df[temporary_colum].isin(param[column]), temporary_colum] = column
df = pivot(df, index, temporary_colum, value)
return df
|
[
"Pivot",
"a",
"dataframe",
"by",
"group",
"of",
"variables"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/pivot.py#L61-L136
|
[
"def",
"pivot_by_group",
"(",
"df",
",",
"variable",
",",
"value",
",",
"new_columns",
",",
"groups",
",",
"id_cols",
"=",
"None",
")",
":",
"if",
"id_cols",
"is",
"None",
":",
"index",
"=",
"[",
"variable",
"]",
"else",
":",
"index",
"=",
"[",
"variable",
"]",
"+",
"id_cols",
"param",
"=",
"pd",
".",
"DataFrame",
"(",
"groups",
",",
"index",
"=",
"new_columns",
")",
"temporary_colum",
"=",
"'tmp'",
"df",
"[",
"temporary_colum",
"]",
"=",
"df",
"[",
"variable",
"]",
"for",
"column",
"in",
"param",
".",
"columns",
":",
"df",
".",
"loc",
"[",
"df",
"[",
"variable",
"]",
".",
"isin",
"(",
"param",
"[",
"column",
"]",
")",
",",
"variable",
"]",
"=",
"column",
"param",
"=",
"param",
".",
"T",
"for",
"column",
"in",
"param",
".",
"columns",
":",
"df",
".",
"loc",
"[",
"df",
"[",
"temporary_colum",
"]",
".",
"isin",
"(",
"param",
"[",
"column",
"]",
")",
",",
"temporary_colum",
"]",
"=",
"column",
"df",
"=",
"pivot",
"(",
"df",
",",
"index",
",",
"temporary_colum",
",",
"value",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
groupby
|
Aggregate values by groups.
---
### Parameters
*mandatory :*
- `group_cols` (*list*): list of columns used to group data
- `aggregations` (*dict*): dictionnary of values columns to group as keys and aggregation
function to use as values (See the [list of aggregation functions](
https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#aggregation))
---
### Example
**Input**
| ENTITY | YEAR | VALUE_1 | VALUE_2 |
|:------:|:----:|:-------:|:-------:|
| A | 2017 | 10 | 3 |
| A | 2017 | 20 | 1 |
| A | 2018 | 10 | 5 |
| A | 2018 | 30 | 4 |
| B | 2017 | 60 | 4 |
| B | 2017 | 40 | 3 |
| B | 2018 | 50 | 7 |
| B | 2018 | 60 | 6 |
```cson
groupby:
group_cols: ['ENTITY', 'YEAR']
aggregations:
'VALUE_1': 'sum',
'VALUE_2': 'mean'
```
**Output**
| ENTITY | YEAR | VALUE_1 | VALUE_2 |
|:------:|:----:|:-------:|:-------:|
| A | 2017 | 30 | 2.0 |
| A | 2018 | 40 | 4.5 |
| B | 2017 | 100 | 3.5 |
| B | 2018 | 110 | 6.5 |
|
toucan_data_sdk/utils/postprocess/groupby.py
|
def groupby(df, *, group_cols: Union[str, List[str]],
aggregations: Dict[str, Union[str, List[str]]]):
"""
Aggregate values by groups.
---
### Parameters
*mandatory :*
- `group_cols` (*list*): list of columns used to group data
- `aggregations` (*dict*): dictionnary of values columns to group as keys and aggregation
function to use as values (See the [list of aggregation functions](
https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#aggregation))
---
### Example
**Input**
| ENTITY | YEAR | VALUE_1 | VALUE_2 |
|:------:|:----:|:-------:|:-------:|
| A | 2017 | 10 | 3 |
| A | 2017 | 20 | 1 |
| A | 2018 | 10 | 5 |
| A | 2018 | 30 | 4 |
| B | 2017 | 60 | 4 |
| B | 2017 | 40 | 3 |
| B | 2018 | 50 | 7 |
| B | 2018 | 60 | 6 |
```cson
groupby:
group_cols: ['ENTITY', 'YEAR']
aggregations:
'VALUE_1': 'sum',
'VALUE_2': 'mean'
```
**Output**
| ENTITY | YEAR | VALUE_1 | VALUE_2 |
|:------:|:----:|:-------:|:-------:|
| A | 2017 | 30 | 2.0 |
| A | 2018 | 40 | 4.5 |
| B | 2017 | 100 | 3.5 |
| B | 2018 | 110 | 6.5 |
"""
df = df.groupby(group_cols, as_index=False).agg(aggregations)
# When several aggregations are performed on the same column, pandas return
# a multi-indexed dataframe, so we need to flatten the columns index to get
# back to a unique level header
if df.columns.nlevels == 2:
level_0 = df.columns.get_level_values(0)
level_1 = df.columns.get_level_values(1)
new_columns = [(f'{x}_{y}' if x else y) for (x, y)
in zip(level_1, level_0)]
df.columns = new_columns
return df
|
def groupby(df, *, group_cols: Union[str, List[str]],
aggregations: Dict[str, Union[str, List[str]]]):
"""
Aggregate values by groups.
---
### Parameters
*mandatory :*
- `group_cols` (*list*): list of columns used to group data
- `aggregations` (*dict*): dictionnary of values columns to group as keys and aggregation
function to use as values (See the [list of aggregation functions](
https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#aggregation))
---
### Example
**Input**
| ENTITY | YEAR | VALUE_1 | VALUE_2 |
|:------:|:----:|:-------:|:-------:|
| A | 2017 | 10 | 3 |
| A | 2017 | 20 | 1 |
| A | 2018 | 10 | 5 |
| A | 2018 | 30 | 4 |
| B | 2017 | 60 | 4 |
| B | 2017 | 40 | 3 |
| B | 2018 | 50 | 7 |
| B | 2018 | 60 | 6 |
```cson
groupby:
group_cols: ['ENTITY', 'YEAR']
aggregations:
'VALUE_1': 'sum',
'VALUE_2': 'mean'
```
**Output**
| ENTITY | YEAR | VALUE_1 | VALUE_2 |
|:------:|:----:|:-------:|:-------:|
| A | 2017 | 30 | 2.0 |
| A | 2018 | 40 | 4.5 |
| B | 2017 | 100 | 3.5 |
| B | 2018 | 110 | 6.5 |
"""
df = df.groupby(group_cols, as_index=False).agg(aggregations)
# When several aggregations are performed on the same column, pandas return
# a multi-indexed dataframe, so we need to flatten the columns index to get
# back to a unique level header
if df.columns.nlevels == 2:
level_0 = df.columns.get_level_values(0)
level_1 = df.columns.get_level_values(1)
new_columns = [(f'{x}_{y}' if x else y) for (x, y)
in zip(level_1, level_0)]
df.columns = new_columns
return df
|
[
"Aggregate",
"values",
"by",
"groups",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/groupby.py#L4-L65
|
[
"def",
"groupby",
"(",
"df",
",",
"*",
",",
"group_cols",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
",",
"aggregations",
":",
"Dict",
"[",
"str",
",",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"]",
")",
":",
"df",
"=",
"df",
".",
"groupby",
"(",
"group_cols",
",",
"as_index",
"=",
"False",
")",
".",
"agg",
"(",
"aggregations",
")",
"# When several aggregations are performed on the same column, pandas return",
"# a multi-indexed dataframe, so we need to flatten the columns index to get",
"# back to a unique level header",
"if",
"df",
".",
"columns",
".",
"nlevels",
"==",
"2",
":",
"level_0",
"=",
"df",
".",
"columns",
".",
"get_level_values",
"(",
"0",
")",
"level_1",
"=",
"df",
".",
"columns",
".",
"get_level_values",
"(",
"1",
")",
"new_columns",
"=",
"[",
"(",
"f'{x}_{y}'",
"if",
"x",
"else",
"y",
")",
"for",
"(",
"x",
",",
"y",
")",
"in",
"zip",
"(",
"level_1",
",",
"level_0",
")",
"]",
"df",
".",
"columns",
"=",
"new_columns",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
cumsum
|
DEPRECATED - please use `compute_cumsum` instead
|
toucan_data_sdk/utils/postprocess/cumsum.py
|
def cumsum(df, new_column: str, column: str, index: list, date_column: str, date_format: str):
"""
DEPRECATED - please use `compute_cumsum` instead
"""
logging.getLogger(__name__).warning(f"DEPRECATED: use compute_cumsum")
date_temp = '__date_temp__'
if isinstance(index, str):
index = [index]
levels = list(range(0, len(index)))
df[date_temp] = pd.to_datetime(df[date_column], format=date_format)
reference_cols = [date_temp, date_column]
df = df.groupby(index + reference_cols).sum()
df[new_column] = df.groupby(level=levels)[column].cumsum()
df.reset_index(inplace=True)
del df[date_temp]
return df
|
def cumsum(df, new_column: str, column: str, index: list, date_column: str, date_format: str):
"""
DEPRECATED - please use `compute_cumsum` instead
"""
logging.getLogger(__name__).warning(f"DEPRECATED: use compute_cumsum")
date_temp = '__date_temp__'
if isinstance(index, str):
index = [index]
levels = list(range(0, len(index)))
df[date_temp] = pd.to_datetime(df[date_column], format=date_format)
reference_cols = [date_temp, date_column]
df = df.groupby(index + reference_cols).sum()
df[new_column] = df.groupby(level=levels)[column].cumsum()
df.reset_index(inplace=True)
del df[date_temp]
return df
|
[
"DEPRECATED",
"-",
"please",
"use",
"compute_cumsum",
"instead"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/cumsum.py#L5-L21
|
[
"def",
"cumsum",
"(",
"df",
",",
"new_column",
":",
"str",
",",
"column",
":",
"str",
",",
"index",
":",
"list",
",",
"date_column",
":",
"str",
",",
"date_format",
":",
"str",
")",
":",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
".",
"warning",
"(",
"f\"DEPRECATED: use compute_cumsum\"",
")",
"date_temp",
"=",
"'__date_temp__'",
"if",
"isinstance",
"(",
"index",
",",
"str",
")",
":",
"index",
"=",
"[",
"index",
"]",
"levels",
"=",
"list",
"(",
"range",
"(",
"0",
",",
"len",
"(",
"index",
")",
")",
")",
"df",
"[",
"date_temp",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"date_column",
"]",
",",
"format",
"=",
"date_format",
")",
"reference_cols",
"=",
"[",
"date_temp",
",",
"date_column",
"]",
"df",
"=",
"df",
".",
"groupby",
"(",
"index",
"+",
"reference_cols",
")",
".",
"sum",
"(",
")",
"df",
"[",
"new_column",
"]",
"=",
"df",
".",
"groupby",
"(",
"level",
"=",
"levels",
")",
"[",
"column",
"]",
".",
"cumsum",
"(",
")",
"df",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
")",
"del",
"df",
"[",
"date_temp",
"]",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
add_missing_row
|
Add missing row to a df base on a reference column
---
### Parameters
*mandatory :*
- `id_cols` (*list of str*): names of the columns used to create each group
- `reference_col` (*str*): name of the column used to identify missing rows
*optional :*
- `complete_index` (*list* or *dict*): [A, B, C] a list of values used to add missing rows.
It can also be a dict to declare a date range.
By default, use all values of reference_col.
- `method` (*str*): by default all missing rows are added. The possible values are :
- `"between"` : add missing rows having their value between min and max values for each group,
- `"between_and_after"` : add missing rows having their value bigger than min value for each group.
- `"between_and_before"` : add missing rows having their value smaller than max values for each group.
- `cols_to_keep` (*list of str*): name of other columns to keep, linked to the reference_col.
---
### Example
**Input**
YEAR | MONTH | NAME
:---:|:---:|:--:
2017|1|A
2017|2|A
2017|3|A
2017|1|B
2017|3|B
```cson
add_missing_row:
id_cols: ['NAME']
reference_col: 'MONTH'
```
**Output**
YEAR | MONTH | NAME
:---:|:---:|:--:
2017|1|A
2017|2|A
2017|3|A
2017|1|B
2017|2|B
2017|3|B
|
toucan_data_sdk/utils/generic/add_missing_row.py
|
def add_missing_row(
df: pd.DataFrame,
id_cols: List[str],
reference_col: str,
complete_index: Union[Dict[str, str], List[str]] = None,
method: str = None,
cols_to_keep: List[str] = None
) -> pd.DataFrame:
"""
Add missing row to a df base on a reference column
---
### Parameters
*mandatory :*
- `id_cols` (*list of str*): names of the columns used to create each group
- `reference_col` (*str*): name of the column used to identify missing rows
*optional :*
- `complete_index` (*list* or *dict*): [A, B, C] a list of values used to add missing rows.
It can also be a dict to declare a date range.
By default, use all values of reference_col.
- `method` (*str*): by default all missing rows are added. The possible values are :
- `"between"` : add missing rows having their value between min and max values for each group,
- `"between_and_after"` : add missing rows having their value bigger than min value for each group.
- `"between_and_before"` : add missing rows having their value smaller than max values for each group.
- `cols_to_keep` (*list of str*): name of other columns to keep, linked to the reference_col.
---
### Example
**Input**
YEAR | MONTH | NAME
:---:|:---:|:--:
2017|1|A
2017|2|A
2017|3|A
2017|1|B
2017|3|B
```cson
add_missing_row:
id_cols: ['NAME']
reference_col: 'MONTH'
```
**Output**
YEAR | MONTH | NAME
:---:|:---:|:--:
2017|1|A
2017|2|A
2017|3|A
2017|1|B
2017|2|B
2017|3|B
"""
if cols_to_keep is None:
cols_for_index = [reference_col]
else:
cols_for_index = [reference_col] + cols_to_keep
check_params_columns_duplicate(id_cols + cols_for_index)
if method == 'between' or method == 'between_and_after':
df['start'] = df.groupby(id_cols)[reference_col].transform(min)
id_cols += ['start']
if method == 'between' or method == 'between_and_before':
df['end'] = df.groupby(id_cols)[reference_col].transform(max)
id_cols += ['end']
names = id_cols + cols_for_index
new_df = df.set_index(names)
index_values = df.groupby(id_cols).sum().index.values
if complete_index is None:
complete_index = df.groupby(cols_for_index).sum().index.values
elif isinstance(complete_index, dict):
if complete_index['type'] == 'date':
freq = complete_index['freq']
date_format = complete_index['format']
start = complete_index['start']
end = complete_index['end']
if isinstance(freq, dict):
freq = pd.DateOffset(**{k: int(v) for k, v in freq.items()})
complete_index = pd.date_range(start=start, end=end, freq=freq)
complete_index = complete_index.strftime(date_format)
else:
raise ParamsValueError(f'Unknown complete index type: '
f'{complete_index["type"]}')
if not isinstance(index_values[0], tuple):
index_values = [(x,) for x in index_values]
if not isinstance(complete_index[0], tuple):
complete_index = [(x,) for x in complete_index]
new_tuples_index = [x + y for x in index_values for y in complete_index]
new_index = pd.MultiIndex.from_tuples(new_tuples_index, names=names)
new_df = new_df.reindex(new_index).reset_index()
if method == 'between' or method == 'between_and_after':
new_df = new_df[new_df[reference_col] >= new_df['start']]
del new_df['start']
if method == 'between' or method == 'between_and_before':
new_df = new_df[new_df[reference_col] <= new_df['end']]
del new_df['end']
return new_df
|
def add_missing_row(
df: pd.DataFrame,
id_cols: List[str],
reference_col: str,
complete_index: Union[Dict[str, str], List[str]] = None,
method: str = None,
cols_to_keep: List[str] = None
) -> pd.DataFrame:
"""
Add missing row to a df base on a reference column
---
### Parameters
*mandatory :*
- `id_cols` (*list of str*): names of the columns used to create each group
- `reference_col` (*str*): name of the column used to identify missing rows
*optional :*
- `complete_index` (*list* or *dict*): [A, B, C] a list of values used to add missing rows.
It can also be a dict to declare a date range.
By default, use all values of reference_col.
- `method` (*str*): by default all missing rows are added. The possible values are :
- `"between"` : add missing rows having their value between min and max values for each group,
- `"between_and_after"` : add missing rows having their value bigger than min value for each group.
- `"between_and_before"` : add missing rows having their value smaller than max values for each group.
- `cols_to_keep` (*list of str*): name of other columns to keep, linked to the reference_col.
---
### Example
**Input**
YEAR | MONTH | NAME
:---:|:---:|:--:
2017|1|A
2017|2|A
2017|3|A
2017|1|B
2017|3|B
```cson
add_missing_row:
id_cols: ['NAME']
reference_col: 'MONTH'
```
**Output**
YEAR | MONTH | NAME
:---:|:---:|:--:
2017|1|A
2017|2|A
2017|3|A
2017|1|B
2017|2|B
2017|3|B
"""
if cols_to_keep is None:
cols_for_index = [reference_col]
else:
cols_for_index = [reference_col] + cols_to_keep
check_params_columns_duplicate(id_cols + cols_for_index)
if method == 'between' or method == 'between_and_after':
df['start'] = df.groupby(id_cols)[reference_col].transform(min)
id_cols += ['start']
if method == 'between' or method == 'between_and_before':
df['end'] = df.groupby(id_cols)[reference_col].transform(max)
id_cols += ['end']
names = id_cols + cols_for_index
new_df = df.set_index(names)
index_values = df.groupby(id_cols).sum().index.values
if complete_index is None:
complete_index = df.groupby(cols_for_index).sum().index.values
elif isinstance(complete_index, dict):
if complete_index['type'] == 'date':
freq = complete_index['freq']
date_format = complete_index['format']
start = complete_index['start']
end = complete_index['end']
if isinstance(freq, dict):
freq = pd.DateOffset(**{k: int(v) for k, v in freq.items()})
complete_index = pd.date_range(start=start, end=end, freq=freq)
complete_index = complete_index.strftime(date_format)
else:
raise ParamsValueError(f'Unknown complete index type: '
f'{complete_index["type"]}')
if not isinstance(index_values[0], tuple):
index_values = [(x,) for x in index_values]
if not isinstance(complete_index[0], tuple):
complete_index = [(x,) for x in complete_index]
new_tuples_index = [x + y for x in index_values for y in complete_index]
new_index = pd.MultiIndex.from_tuples(new_tuples_index, names=names)
new_df = new_df.reindex(new_index).reset_index()
if method == 'between' or method == 'between_and_after':
new_df = new_df[new_df[reference_col] >= new_df['start']]
del new_df['start']
if method == 'between' or method == 'between_and_before':
new_df = new_df[new_df[reference_col] <= new_df['end']]
del new_df['end']
return new_df
|
[
"Add",
"missing",
"row",
"to",
"a",
"df",
"base",
"on",
"a",
"reference",
"column"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/generic/add_missing_row.py#L11-L121
|
[
"def",
"add_missing_row",
"(",
"df",
":",
"pd",
".",
"DataFrame",
",",
"id_cols",
":",
"List",
"[",
"str",
"]",
",",
"reference_col",
":",
"str",
",",
"complete_index",
":",
"Union",
"[",
"Dict",
"[",
"str",
",",
"str",
"]",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"method",
":",
"str",
"=",
"None",
",",
"cols_to_keep",
":",
"List",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"pd",
".",
"DataFrame",
":",
"if",
"cols_to_keep",
"is",
"None",
":",
"cols_for_index",
"=",
"[",
"reference_col",
"]",
"else",
":",
"cols_for_index",
"=",
"[",
"reference_col",
"]",
"+",
"cols_to_keep",
"check_params_columns_duplicate",
"(",
"id_cols",
"+",
"cols_for_index",
")",
"if",
"method",
"==",
"'between'",
"or",
"method",
"==",
"'between_and_after'",
":",
"df",
"[",
"'start'",
"]",
"=",
"df",
".",
"groupby",
"(",
"id_cols",
")",
"[",
"reference_col",
"]",
".",
"transform",
"(",
"min",
")",
"id_cols",
"+=",
"[",
"'start'",
"]",
"if",
"method",
"==",
"'between'",
"or",
"method",
"==",
"'between_and_before'",
":",
"df",
"[",
"'end'",
"]",
"=",
"df",
".",
"groupby",
"(",
"id_cols",
")",
"[",
"reference_col",
"]",
".",
"transform",
"(",
"max",
")",
"id_cols",
"+=",
"[",
"'end'",
"]",
"names",
"=",
"id_cols",
"+",
"cols_for_index",
"new_df",
"=",
"df",
".",
"set_index",
"(",
"names",
")",
"index_values",
"=",
"df",
".",
"groupby",
"(",
"id_cols",
")",
".",
"sum",
"(",
")",
".",
"index",
".",
"values",
"if",
"complete_index",
"is",
"None",
":",
"complete_index",
"=",
"df",
".",
"groupby",
"(",
"cols_for_index",
")",
".",
"sum",
"(",
")",
".",
"index",
".",
"values",
"elif",
"isinstance",
"(",
"complete_index",
",",
"dict",
")",
":",
"if",
"complete_index",
"[",
"'type'",
"]",
"==",
"'date'",
":",
"freq",
"=",
"complete_index",
"[",
"'freq'",
"]",
"date_format",
"=",
"complete_index",
"[",
"'format'",
"]",
"start",
"=",
"complete_index",
"[",
"'start'",
"]",
"end",
"=",
"complete_index",
"[",
"'end'",
"]",
"if",
"isinstance",
"(",
"freq",
",",
"dict",
")",
":",
"freq",
"=",
"pd",
".",
"DateOffset",
"(",
"*",
"*",
"{",
"k",
":",
"int",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"freq",
".",
"items",
"(",
")",
"}",
")",
"complete_index",
"=",
"pd",
".",
"date_range",
"(",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"freq",
"=",
"freq",
")",
"complete_index",
"=",
"complete_index",
".",
"strftime",
"(",
"date_format",
")",
"else",
":",
"raise",
"ParamsValueError",
"(",
"f'Unknown complete index type: '",
"f'{complete_index[\"type\"]}'",
")",
"if",
"not",
"isinstance",
"(",
"index_values",
"[",
"0",
"]",
",",
"tuple",
")",
":",
"index_values",
"=",
"[",
"(",
"x",
",",
")",
"for",
"x",
"in",
"index_values",
"]",
"if",
"not",
"isinstance",
"(",
"complete_index",
"[",
"0",
"]",
",",
"tuple",
")",
":",
"complete_index",
"=",
"[",
"(",
"x",
",",
")",
"for",
"x",
"in",
"complete_index",
"]",
"new_tuples_index",
"=",
"[",
"x",
"+",
"y",
"for",
"x",
"in",
"index_values",
"for",
"y",
"in",
"complete_index",
"]",
"new_index",
"=",
"pd",
".",
"MultiIndex",
".",
"from_tuples",
"(",
"new_tuples_index",
",",
"names",
"=",
"names",
")",
"new_df",
"=",
"new_df",
".",
"reindex",
"(",
"new_index",
")",
".",
"reset_index",
"(",
")",
"if",
"method",
"==",
"'between'",
"or",
"method",
"==",
"'between_and_after'",
":",
"new_df",
"=",
"new_df",
"[",
"new_df",
"[",
"reference_col",
"]",
">=",
"new_df",
"[",
"'start'",
"]",
"]",
"del",
"new_df",
"[",
"'start'",
"]",
"if",
"method",
"==",
"'between'",
"or",
"method",
"==",
"'between_and_before'",
":",
"new_df",
"=",
"new_df",
"[",
"new_df",
"[",
"reference_col",
"]",
"<=",
"new_df",
"[",
"'end'",
"]",
"]",
"del",
"new_df",
"[",
"'end'",
"]",
"return",
"new_df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
extract_zip
|
Returns:
dict: Dict[str, DataFrame]
|
toucan_data_sdk/sdk.py
|
def extract_zip(zip_file_path):
"""
Returns:
dict: Dict[str, DataFrame]
"""
dfs = {}
with zipfile.ZipFile(zip_file_path, mode='r') as z_file:
names = z_file.namelist()
for name in names:
content = z_file.read(name)
_, tmp_file_path = tempfile.mkstemp()
try:
with open(tmp_file_path, 'wb') as tmp_file:
tmp_file.write(content)
dfs[name] = joblib.load(tmp_file_path)
finally:
shutil.rmtree(tmp_file_path, ignore_errors=True)
return dfs
|
def extract_zip(zip_file_path):
"""
Returns:
dict: Dict[str, DataFrame]
"""
dfs = {}
with zipfile.ZipFile(zip_file_path, mode='r') as z_file:
names = z_file.namelist()
for name in names:
content = z_file.read(name)
_, tmp_file_path = tempfile.mkstemp()
try:
with open(tmp_file_path, 'wb') as tmp_file:
tmp_file.write(content)
dfs[name] = joblib.load(tmp_file_path)
finally:
shutil.rmtree(tmp_file_path, ignore_errors=True)
return dfs
|
[
"Returns",
":",
"dict",
":",
"Dict",
"[",
"str",
"DataFrame",
"]"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/sdk.py#L173-L191
|
[
"def",
"extract_zip",
"(",
"zip_file_path",
")",
":",
"dfs",
"=",
"{",
"}",
"with",
"zipfile",
".",
"ZipFile",
"(",
"zip_file_path",
",",
"mode",
"=",
"'r'",
")",
"as",
"z_file",
":",
"names",
"=",
"z_file",
".",
"namelist",
"(",
")",
"for",
"name",
"in",
"names",
":",
"content",
"=",
"z_file",
".",
"read",
"(",
"name",
")",
"_",
",",
"tmp_file_path",
"=",
"tempfile",
".",
"mkstemp",
"(",
")",
"try",
":",
"with",
"open",
"(",
"tmp_file_path",
",",
"'wb'",
")",
"as",
"tmp_file",
":",
"tmp_file",
".",
"write",
"(",
"content",
")",
"dfs",
"[",
"name",
"]",
"=",
"joblib",
".",
"load",
"(",
"tmp_file_path",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"tmp_file_path",
",",
"ignore_errors",
"=",
"True",
")",
"return",
"dfs"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
extract
|
Args:
data (str | byte):
Returns:
dict: Dict[str, DataFrame]
|
toucan_data_sdk/sdk.py
|
def extract(data):
"""
Args:
data (str | byte):
Returns:
dict: Dict[str, DataFrame]
"""
_, tmp_file_path = tempfile.mkstemp()
try:
with open(tmp_file_path, 'wb') as tmp_file:
tmp_file.write(data)
if zipfile.is_zipfile(tmp_file_path):
return extract_zip(tmp_file_path)
else:
raise DataSdkError('Unsupported file type')
finally:
shutil.rmtree(tmp_file_path, ignore_errors=True)
|
def extract(data):
"""
Args:
data (str | byte):
Returns:
dict: Dict[str, DataFrame]
"""
_, tmp_file_path = tempfile.mkstemp()
try:
with open(tmp_file_path, 'wb') as tmp_file:
tmp_file.write(data)
if zipfile.is_zipfile(tmp_file_path):
return extract_zip(tmp_file_path)
else:
raise DataSdkError('Unsupported file type')
finally:
shutil.rmtree(tmp_file_path, ignore_errors=True)
|
[
"Args",
":",
"data",
"(",
"str",
"|",
"byte",
")",
":"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/sdk.py#L194-L213
|
[
"def",
"extract",
"(",
"data",
")",
":",
"_",
",",
"tmp_file_path",
"=",
"tempfile",
".",
"mkstemp",
"(",
")",
"try",
":",
"with",
"open",
"(",
"tmp_file_path",
",",
"'wb'",
")",
"as",
"tmp_file",
":",
"tmp_file",
".",
"write",
"(",
"data",
")",
"if",
"zipfile",
".",
"is_zipfile",
"(",
"tmp_file_path",
")",
":",
"return",
"extract_zip",
"(",
"tmp_file_path",
")",
"else",
":",
"raise",
"DataSdkError",
"(",
"'Unsupported file type'",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"tmp_file_path",
",",
"ignore_errors",
"=",
"True",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
ToucanDataSdk.read_from_cache
|
Returns:
dict: Dict[str, DataFrame]
|
toucan_data_sdk/sdk.py
|
def read_from_cache(self, domains=None):
"""
Returns:
dict: Dict[str, DataFrame]
"""
logger.info(f'Reading data from cache ({self.EXTRACTION_CACHE_PATH})')
if domains is not None and isinstance(domains, list):
dfs = {domain: self.read_entry(domain) for domain in domains}
else:
dfs = {name: self.read_entry(name)
for name in os.listdir(self.EXTRACTION_CACHE_PATH)}
return dfs
|
def read_from_cache(self, domains=None):
"""
Returns:
dict: Dict[str, DataFrame]
"""
logger.info(f'Reading data from cache ({self.EXTRACTION_CACHE_PATH})')
if domains is not None and isinstance(domains, list):
dfs = {domain: self.read_entry(domain) for domain in domains}
else:
dfs = {name: self.read_entry(name)
for name in os.listdir(self.EXTRACTION_CACHE_PATH)}
return dfs
|
[
"Returns",
":",
"dict",
":",
"Dict",
"[",
"str",
"DataFrame",
"]"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/sdk.py#L114-L125
|
[
"def",
"read_from_cache",
"(",
"self",
",",
"domains",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"f'Reading data from cache ({self.EXTRACTION_CACHE_PATH})'",
")",
"if",
"domains",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"domains",
",",
"list",
")",
":",
"dfs",
"=",
"{",
"domain",
":",
"self",
".",
"read_entry",
"(",
"domain",
")",
"for",
"domain",
"in",
"domains",
"}",
"else",
":",
"dfs",
"=",
"{",
"name",
":",
"self",
".",
"read_entry",
"(",
"name",
")",
"for",
"name",
"in",
"os",
".",
"listdir",
"(",
"self",
".",
"EXTRACTION_CACHE_PATH",
")",
"}",
"return",
"dfs"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
ToucanDataSdk.read_entry
|
Args:
file_name (str):
Returns:
pd.DataFrame:
|
toucan_data_sdk/sdk.py
|
def read_entry(self, file_name):
"""
Args:
file_name (str):
Returns:
pd.DataFrame:
"""
file_path = os.path.join(self.EXTRACTION_CACHE_PATH, file_name)
logger.info(f'Reading cache entry: {file_path}')
return joblib.load(file_path)
|
def read_entry(self, file_name):
"""
Args:
file_name (str):
Returns:
pd.DataFrame:
"""
file_path = os.path.join(self.EXTRACTION_CACHE_PATH, file_name)
logger.info(f'Reading cache entry: {file_path}')
return joblib.load(file_path)
|
[
"Args",
":",
"file_name",
"(",
"str",
")",
":"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/sdk.py#L127-L137
|
[
"def",
"read_entry",
"(",
"self",
",",
"file_name",
")",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"EXTRACTION_CACHE_PATH",
",",
"file_name",
")",
"logger",
".",
"info",
"(",
"f'Reading cache entry: {file_path}'",
")",
"return",
"joblib",
".",
"load",
"(",
"file_path",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
ToucanDataSdk.write
|
Args:
data (str | byte):
Returns:
dict: Dict[str, DataFrame]
|
toucan_data_sdk/sdk.py
|
def write(self, dfs):
"""
Args:
data (str | byte):
Returns:
dict: Dict[str, DataFrame]
"""
if not os.path.exists(self.EXTRACTION_CACHE_PATH):
os.makedirs(self.EXTRACTION_CACHE_PATH)
for name, df in dfs.items():
file_path = os.path.join(self.EXTRACTION_CACHE_PATH, name)
joblib.dump(df, filename=file_path)
logger.info(f'Cache entry added: {file_path}')
|
def write(self, dfs):
"""
Args:
data (str | byte):
Returns:
dict: Dict[str, DataFrame]
"""
if not os.path.exists(self.EXTRACTION_CACHE_PATH):
os.makedirs(self.EXTRACTION_CACHE_PATH)
for name, df in dfs.items():
file_path = os.path.join(self.EXTRACTION_CACHE_PATH, name)
joblib.dump(df, filename=file_path)
logger.info(f'Cache entry added: {file_path}')
|
[
"Args",
":",
"data",
"(",
"str",
"|",
"byte",
")",
":"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/sdk.py#L139-L153
|
[
"def",
"write",
"(",
"self",
",",
"dfs",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"EXTRACTION_CACHE_PATH",
")",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"EXTRACTION_CACHE_PATH",
")",
"for",
"name",
",",
"df",
"in",
"dfs",
".",
"items",
"(",
")",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"EXTRACTION_CACHE_PATH",
",",
"name",
")",
"joblib",
".",
"dump",
"(",
"df",
",",
"filename",
"=",
"file_path",
")",
"logger",
".",
"info",
"(",
"f'Cache entry added: {file_path}'",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
clean_dataframe
|
This method is used to:
- slugify the column names (if slugify is set to True)
- convert columns to 'category' (if len(unique) < threshold) or 'int'
- clean the dataframe and rename if necessary
|
toucan_data_sdk/utils/generic/clean.py
|
def clean_dataframe(df, is_slugify=True, threshold=50, rename_cols=None):
"""
This method is used to:
- slugify the column names (if slugify is set to True)
- convert columns to 'category' (if len(unique) < threshold) or 'int'
- clean the dataframe and rename if necessary
"""
if is_slugify:
df = df.rename(columns=slugify)
df = df.dropna(axis=1, how='all')
for column in get_category_cols(df, threshold=threshold):
df[column] = df[column].astype('category')
for column in get_int_cols(df):
df[column] = df[column].astype(int)
if rename_cols is not None:
df = df.rename(columns=rename_cols)
return df
|
def clean_dataframe(df, is_slugify=True, threshold=50, rename_cols=None):
"""
This method is used to:
- slugify the column names (if slugify is set to True)
- convert columns to 'category' (if len(unique) < threshold) or 'int'
- clean the dataframe and rename if necessary
"""
if is_slugify:
df = df.rename(columns=slugify)
df = df.dropna(axis=1, how='all')
for column in get_category_cols(df, threshold=threshold):
df[column] = df[column].astype('category')
for column in get_int_cols(df):
df[column] = df[column].astype(int)
if rename_cols is not None:
df = df.rename(columns=rename_cols)
return df
|
[
"This",
"method",
"is",
"used",
"to",
":",
"-",
"slugify",
"the",
"column",
"names",
"(",
"if",
"slugify",
"is",
"set",
"to",
"True",
")",
"-",
"convert",
"columns",
"to",
"category",
"(",
"if",
"len",
"(",
"unique",
")",
"<",
"threshold",
")",
"or",
"int",
"-",
"clean",
"the",
"dataframe",
"and",
"rename",
"if",
"necessary"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/generic/clean.py#L14-L33
|
[
"def",
"clean_dataframe",
"(",
"df",
",",
"is_slugify",
"=",
"True",
",",
"threshold",
"=",
"50",
",",
"rename_cols",
"=",
"None",
")",
":",
"if",
"is_slugify",
":",
"df",
"=",
"df",
".",
"rename",
"(",
"columns",
"=",
"slugify",
")",
"df",
"=",
"df",
".",
"dropna",
"(",
"axis",
"=",
"1",
",",
"how",
"=",
"'all'",
")",
"for",
"column",
"in",
"get_category_cols",
"(",
"df",
",",
"threshold",
"=",
"threshold",
")",
":",
"df",
"[",
"column",
"]",
"=",
"df",
"[",
"column",
"]",
".",
"astype",
"(",
"'category'",
")",
"for",
"column",
"in",
"get_int_cols",
"(",
"df",
")",
":",
"df",
"[",
"column",
"]",
"=",
"df",
"[",
"column",
"]",
".",
"astype",
"(",
"int",
")",
"if",
"rename_cols",
"is",
"not",
"None",
":",
"df",
"=",
"df",
".",
"rename",
"(",
"columns",
"=",
"rename_cols",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
compute_ffill_by_group
|
Compute `ffill` with `groupby`
Dedicated method as there is a performance issue with a simple groupby/fillna (2017/07)
The method `ffill` propagates last valid value forward to next values.
---
### Parameters
*mandatory :*
- `id_cols` (*list of str*): names of columns used to create each group.
- `reference_cols` (*list of str*): names of columns used to sort.
- `value_col` (*str*): name of the columns to fill.
---
### Example
**Input**
name | rank | value
:------:|:--------------:|:--------:
A | 1 | 2
A | 2 | 5
A | 3 | null
B | 1 | null
B | 2 | 7
```cson
compute_ffill_by_group:
id_cols: ['name']
reference_cols: ['rank']
value_col: 'value'
```
**Ouput**
name | rank | value
:------:|:--------------:|:--------:
A | 1 | 2
A | 2 | 5
A | 3 | 5
B | 1 | null
B | 2 | 7
|
toucan_data_sdk/utils/generic/compute_ffill_by_group.py
|
def compute_ffill_by_group(
df,
id_cols: List[str],
reference_cols: List[str],
value_col: str
):
"""
Compute `ffill` with `groupby`
Dedicated method as there is a performance issue with a simple groupby/fillna (2017/07)
The method `ffill` propagates last valid value forward to next values.
---
### Parameters
*mandatory :*
- `id_cols` (*list of str*): names of columns used to create each group.
- `reference_cols` (*list of str*): names of columns used to sort.
- `value_col` (*str*): name of the columns to fill.
---
### Example
**Input**
name | rank | value
:------:|:--------------:|:--------:
A | 1 | 2
A | 2 | 5
A | 3 | null
B | 1 | null
B | 2 | 7
```cson
compute_ffill_by_group:
id_cols: ['name']
reference_cols: ['rank']
value_col: 'value'
```
**Ouput**
name | rank | value
:------:|:--------------:|:--------:
A | 1 | 2
A | 2 | 5
A | 3 | 5
B | 1 | null
B | 2 | 7
"""
check_params_columns_duplicate(id_cols + reference_cols + [value_col])
df = df.sort_values(by=id_cols + reference_cols)
df = df.set_index(id_cols)
df['fill'] = 1 - df[value_col].isnull().astype(int)
df['fill'] = df.groupby(
level=list(range(0, len(id_cols) - 1))
)['fill'].cumsum()
df[value_col] = df[value_col].ffill()
df.loc[df['fill'] == 0, value_col] = None
del df['fill']
return df.reset_index()
|
def compute_ffill_by_group(
df,
id_cols: List[str],
reference_cols: List[str],
value_col: str
):
"""
Compute `ffill` with `groupby`
Dedicated method as there is a performance issue with a simple groupby/fillna (2017/07)
The method `ffill` propagates last valid value forward to next values.
---
### Parameters
*mandatory :*
- `id_cols` (*list of str*): names of columns used to create each group.
- `reference_cols` (*list of str*): names of columns used to sort.
- `value_col` (*str*): name of the columns to fill.
---
### Example
**Input**
name | rank | value
:------:|:--------------:|:--------:
A | 1 | 2
A | 2 | 5
A | 3 | null
B | 1 | null
B | 2 | 7
```cson
compute_ffill_by_group:
id_cols: ['name']
reference_cols: ['rank']
value_col: 'value'
```
**Ouput**
name | rank | value
:------:|:--------------:|:--------:
A | 1 | 2
A | 2 | 5
A | 3 | 5
B | 1 | null
B | 2 | 7
"""
check_params_columns_duplicate(id_cols + reference_cols + [value_col])
df = df.sort_values(by=id_cols + reference_cols)
df = df.set_index(id_cols)
df['fill'] = 1 - df[value_col].isnull().astype(int)
df['fill'] = df.groupby(
level=list(range(0, len(id_cols) - 1))
)['fill'].cumsum()
df[value_col] = df[value_col].ffill()
df.loc[df['fill'] == 0, value_col] = None
del df['fill']
return df.reset_index()
|
[
"Compute",
"ffill",
"with",
"groupby",
"Dedicated",
"method",
"as",
"there",
"is",
"a",
"performance",
"issue",
"with",
"a",
"simple",
"groupby",
"/",
"fillna",
"(",
"2017",
"/",
"07",
")",
"The",
"method",
"ffill",
"propagates",
"last",
"valid",
"value",
"forward",
"to",
"next",
"values",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/generic/compute_ffill_by_group.py#L6-L67
|
[
"def",
"compute_ffill_by_group",
"(",
"df",
",",
"id_cols",
":",
"List",
"[",
"str",
"]",
",",
"reference_cols",
":",
"List",
"[",
"str",
"]",
",",
"value_col",
":",
"str",
")",
":",
"check_params_columns_duplicate",
"(",
"id_cols",
"+",
"reference_cols",
"+",
"[",
"value_col",
"]",
")",
"df",
"=",
"df",
".",
"sort_values",
"(",
"by",
"=",
"id_cols",
"+",
"reference_cols",
")",
"df",
"=",
"df",
".",
"set_index",
"(",
"id_cols",
")",
"df",
"[",
"'fill'",
"]",
"=",
"1",
"-",
"df",
"[",
"value_col",
"]",
".",
"isnull",
"(",
")",
".",
"astype",
"(",
"int",
")",
"df",
"[",
"'fill'",
"]",
"=",
"df",
".",
"groupby",
"(",
"level",
"=",
"list",
"(",
"range",
"(",
"0",
",",
"len",
"(",
"id_cols",
")",
"-",
"1",
")",
")",
")",
"[",
"'fill'",
"]",
".",
"cumsum",
"(",
")",
"df",
"[",
"value_col",
"]",
"=",
"df",
"[",
"value_col",
"]",
".",
"ffill",
"(",
")",
"df",
".",
"loc",
"[",
"df",
"[",
"'fill'",
"]",
"==",
"0",
",",
"value_col",
"]",
"=",
"None",
"del",
"df",
"[",
"'fill'",
"]",
"return",
"df",
".",
"reset_index",
"(",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
fake_data_generator
|
`conf` is a list of dictionaries like
{'type': 'label', 'values': ['Paris', 'Marseille', 'Lyons'], 'name': 'Cities'}
and each dictionary will add a column.
There are two different behaviours:
- type: 'label' -> the new column will be taken into account for a cartesian product
with all other labels
- type: 'number' -> the new column will contain simple numbers
|
toucan_data_sdk/fakir/fake_data_generator.py
|
def fake_data_generator(conf: List[dict]) -> pd.DataFrame:
"""
`conf` is a list of dictionaries like
{'type': 'label', 'values': ['Paris', 'Marseille', 'Lyons'], 'name': 'Cities'}
and each dictionary will add a column.
There are two different behaviours:
- type: 'label' -> the new column will be taken into account for a cartesian product
with all other labels
- type: 'number' -> the new column will contain simple numbers
"""
# First create all the lines with the cartesian product of all the
# possible values of 'label' columns
label_confs = [x for x in conf if x['type'] == 'label']
label_names = [x['name'] for x in label_confs]
label_values = [x['values'] for x in label_confs]
df = pd.DataFrame(list(product(*label_values)), columns=label_names)
# Then add all the 'number' columns
number_confs = [x for x in conf if x['type'] == 'number']
for num_conf in number_confs:
num_column = np.random.uniform(low=num_conf['min'], high=num_conf['max'], size=df.shape[0])
df[num_conf['name']] = num_column.round(num_conf.get('digits', 4))
return df
|
def fake_data_generator(conf: List[dict]) -> pd.DataFrame:
"""
`conf` is a list of dictionaries like
{'type': 'label', 'values': ['Paris', 'Marseille', 'Lyons'], 'name': 'Cities'}
and each dictionary will add a column.
There are two different behaviours:
- type: 'label' -> the new column will be taken into account for a cartesian product
with all other labels
- type: 'number' -> the new column will contain simple numbers
"""
# First create all the lines with the cartesian product of all the
# possible values of 'label' columns
label_confs = [x for x in conf if x['type'] == 'label']
label_names = [x['name'] for x in label_confs]
label_values = [x['values'] for x in label_confs]
df = pd.DataFrame(list(product(*label_values)), columns=label_names)
# Then add all the 'number' columns
number_confs = [x for x in conf if x['type'] == 'number']
for num_conf in number_confs:
num_column = np.random.uniform(low=num_conf['min'], high=num_conf['max'], size=df.shape[0])
df[num_conf['name']] = num_column.round(num_conf.get('digits', 4))
return df
|
[
"conf",
"is",
"a",
"list",
"of",
"dictionaries",
"like",
"{",
"type",
":",
"label",
"values",
":",
"[",
"Paris",
"Marseille",
"Lyons",
"]",
"name",
":",
"Cities",
"}",
"and",
"each",
"dictionary",
"will",
"add",
"a",
"column",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/fakir/fake_data_generator.py#L8-L33
|
[
"def",
"fake_data_generator",
"(",
"conf",
":",
"List",
"[",
"dict",
"]",
")",
"->",
"pd",
".",
"DataFrame",
":",
"# First create all the lines with the cartesian product of all the",
"# possible values of 'label' columns",
"label_confs",
"=",
"[",
"x",
"for",
"x",
"in",
"conf",
"if",
"x",
"[",
"'type'",
"]",
"==",
"'label'",
"]",
"label_names",
"=",
"[",
"x",
"[",
"'name'",
"]",
"for",
"x",
"in",
"label_confs",
"]",
"label_values",
"=",
"[",
"x",
"[",
"'values'",
"]",
"for",
"x",
"in",
"label_confs",
"]",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"list",
"(",
"product",
"(",
"*",
"label_values",
")",
")",
",",
"columns",
"=",
"label_names",
")",
"# Then add all the 'number' columns",
"number_confs",
"=",
"[",
"x",
"for",
"x",
"in",
"conf",
"if",
"x",
"[",
"'type'",
"]",
"==",
"'number'",
"]",
"for",
"num_conf",
"in",
"number_confs",
":",
"num_column",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"low",
"=",
"num_conf",
"[",
"'min'",
"]",
",",
"high",
"=",
"num_conf",
"[",
"'max'",
"]",
",",
"size",
"=",
"df",
".",
"shape",
"[",
"0",
"]",
")",
"df",
"[",
"num_conf",
"[",
"'name'",
"]",
"]",
"=",
"num_column",
".",
"round",
"(",
"num_conf",
".",
"get",
"(",
"'digits'",
",",
"4",
")",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
two_values_melt
|
Transforms one or multiple columns into rows.
Unlike melt function, two value columns can be returned by
the function (e.g. an evolution column and a price column)
---
### Parameters
*mandatory :*
- `first_value_vars` (*list of str*): name of the columns corresponding to the first returned value column
- `second_value_vars` (*list of str*): name of the columns corresponding to the second returned value column
- `var_name` (*str*): name of the column containing values in first_value_vars
- `value_name` (*str*): suffix of the two value columns (suffix_first / suffix_second)
---
### Example
**Input**
| Region | avg | total | evo_avg | evo_total |
|:---------:|:--------:|:-----------:|:--------:|:-----------:|
| A | 50| 100 | 1 | 4 |
| B | 40 | 250 | 2 | 5 |
```cson
two_values_melt:
first_value_vars: ["avg", "total"]
second_value_vars: ["evo_avg", "evo_total"]
var_name: "type"
value_name: "value"
```
**Output**
| Region | type | value_first | value_second |
|:---------:|:--------:|:------------:|:-------------:|
| A | avg| 50 | 1 |
| A | total| 100 | 4 |
| B | avg| 40 | 2 |
| B | avg| 250 | 5 |
|
toucan_data_sdk/utils/generic/two_values_melt.py
|
def two_values_melt(
df,
first_value_vars: List[str],
second_value_vars: List[str],
var_name: str,
value_name: str
):
"""
Transforms one or multiple columns into rows.
Unlike melt function, two value columns can be returned by
the function (e.g. an evolution column and a price column)
---
### Parameters
*mandatory :*
- `first_value_vars` (*list of str*): name of the columns corresponding to the first returned value column
- `second_value_vars` (*list of str*): name of the columns corresponding to the second returned value column
- `var_name` (*str*): name of the column containing values in first_value_vars
- `value_name` (*str*): suffix of the two value columns (suffix_first / suffix_second)
---
### Example
**Input**
| Region | avg | total | evo_avg | evo_total |
|:---------:|:--------:|:-----------:|:--------:|:-----------:|
| A | 50| 100 | 1 | 4 |
| B | 40 | 250 | 2 | 5 |
```cson
two_values_melt:
first_value_vars: ["avg", "total"]
second_value_vars: ["evo_avg", "evo_total"]
var_name: "type"
value_name: "value"
```
**Output**
| Region | type | value_first | value_second |
|:---------:|:--------:|:------------:|:-------------:|
| A | avg| 50 | 1 |
| A | total| 100 | 4 |
| B | avg| 40 | 2 |
| B | avg| 250 | 5 |
"""
value_name_first = value_name + '_first'
value_name_second = value_name + '_second'
# Melt on the first value columns
melt_first_value = pd.melt(df,
id_vars=[col for col in list(df) if
col not in first_value_vars],
value_vars=first_value_vars,
var_name=var_name,
value_name=value_name_first)
melt_first_value.drop(second_value_vars, axis=1, inplace=True)
# Melt on the second value columns
melt_second_value = pd.melt(df,
id_vars=[col for col in list(df) if
col not in second_value_vars],
value_vars=second_value_vars,
var_name=var_name,
value_name=value_name_second)
# Since there are two value columns, there is no need to keep the
# second_value_vars names. And it will make things easier for the merge.
normalize_types = {k: v for k, v in zip(second_value_vars, first_value_vars)}
melt_second_value.replace(normalize_types, inplace=True)
melt_second_value.drop(first_value_vars, axis=1, inplace=True)
on_cols = list(melt_first_value)
on_cols.remove(value_name_first)
return pd.merge(melt_first_value, melt_second_value, on=on_cols, how='outer')
|
def two_values_melt(
df,
first_value_vars: List[str],
second_value_vars: List[str],
var_name: str,
value_name: str
):
"""
Transforms one or multiple columns into rows.
Unlike melt function, two value columns can be returned by
the function (e.g. an evolution column and a price column)
---
### Parameters
*mandatory :*
- `first_value_vars` (*list of str*): name of the columns corresponding to the first returned value column
- `second_value_vars` (*list of str*): name of the columns corresponding to the second returned value column
- `var_name` (*str*): name of the column containing values in first_value_vars
- `value_name` (*str*): suffix of the two value columns (suffix_first / suffix_second)
---
### Example
**Input**
| Region | avg | total | evo_avg | evo_total |
|:---------:|:--------:|:-----------:|:--------:|:-----------:|
| A | 50| 100 | 1 | 4 |
| B | 40 | 250 | 2 | 5 |
```cson
two_values_melt:
first_value_vars: ["avg", "total"]
second_value_vars: ["evo_avg", "evo_total"]
var_name: "type"
value_name: "value"
```
**Output**
| Region | type | value_first | value_second |
|:---------:|:--------:|:------------:|:-------------:|
| A | avg| 50 | 1 |
| A | total| 100 | 4 |
| B | avg| 40 | 2 |
| B | avg| 250 | 5 |
"""
value_name_first = value_name + '_first'
value_name_second = value_name + '_second'
# Melt on the first value columns
melt_first_value = pd.melt(df,
id_vars=[col for col in list(df) if
col not in first_value_vars],
value_vars=first_value_vars,
var_name=var_name,
value_name=value_name_first)
melt_first_value.drop(second_value_vars, axis=1, inplace=True)
# Melt on the second value columns
melt_second_value = pd.melt(df,
id_vars=[col for col in list(df) if
col not in second_value_vars],
value_vars=second_value_vars,
var_name=var_name,
value_name=value_name_second)
# Since there are two value columns, there is no need to keep the
# second_value_vars names. And it will make things easier for the merge.
normalize_types = {k: v for k, v in zip(second_value_vars, first_value_vars)}
melt_second_value.replace(normalize_types, inplace=True)
melt_second_value.drop(first_value_vars, axis=1, inplace=True)
on_cols = list(melt_first_value)
on_cols.remove(value_name_first)
return pd.merge(melt_first_value, melt_second_value, on=on_cols, how='outer')
|
[
"Transforms",
"one",
"or",
"multiple",
"columns",
"into",
"rows",
".",
"Unlike",
"melt",
"function",
"two",
"value",
"columns",
"can",
"be",
"returned",
"by",
"the",
"function",
"(",
"e",
".",
"g",
".",
"an",
"evolution",
"column",
"and",
"a",
"price",
"column",
")"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/generic/two_values_melt.py#L5-L83
|
[
"def",
"two_values_melt",
"(",
"df",
",",
"first_value_vars",
":",
"List",
"[",
"str",
"]",
",",
"second_value_vars",
":",
"List",
"[",
"str",
"]",
",",
"var_name",
":",
"str",
",",
"value_name",
":",
"str",
")",
":",
"value_name_first",
"=",
"value_name",
"+",
"'_first'",
"value_name_second",
"=",
"value_name",
"+",
"'_second'",
"# Melt on the first value columns",
"melt_first_value",
"=",
"pd",
".",
"melt",
"(",
"df",
",",
"id_vars",
"=",
"[",
"col",
"for",
"col",
"in",
"list",
"(",
"df",
")",
"if",
"col",
"not",
"in",
"first_value_vars",
"]",
",",
"value_vars",
"=",
"first_value_vars",
",",
"var_name",
"=",
"var_name",
",",
"value_name",
"=",
"value_name_first",
")",
"melt_first_value",
".",
"drop",
"(",
"second_value_vars",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"# Melt on the second value columns",
"melt_second_value",
"=",
"pd",
".",
"melt",
"(",
"df",
",",
"id_vars",
"=",
"[",
"col",
"for",
"col",
"in",
"list",
"(",
"df",
")",
"if",
"col",
"not",
"in",
"second_value_vars",
"]",
",",
"value_vars",
"=",
"second_value_vars",
",",
"var_name",
"=",
"var_name",
",",
"value_name",
"=",
"value_name_second",
")",
"# Since there are two value columns, there is no need to keep the",
"# second_value_vars names. And it will make things easier for the merge.",
"normalize_types",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"second_value_vars",
",",
"first_value_vars",
")",
"}",
"melt_second_value",
".",
"replace",
"(",
"normalize_types",
",",
"inplace",
"=",
"True",
")",
"melt_second_value",
".",
"drop",
"(",
"first_value_vars",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"on_cols",
"=",
"list",
"(",
"melt_first_value",
")",
"on_cols",
".",
"remove",
"(",
"value_name_first",
")",
"return",
"pd",
".",
"merge",
"(",
"melt_first_value",
",",
"melt_second_value",
",",
"on",
"=",
"on_cols",
",",
"how",
"=",
"'outer'",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
concat
|
Concatenate `columns` element-wise
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.cat.html) for more information
---
### Parameters
*mandatory :*
- `columns` (*list*): list of columns to concatenate (at least 2 columns)
- `new_column` (*str*): the destination column
*optional :*
- `sep` (*str*): the separator
|
toucan_data_sdk/utils/postprocess/text.py
|
def concat(
df,
*,
columns: List[str],
new_column: str,
sep: str = None
):
"""
Concatenate `columns` element-wise
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.cat.html) for more information
---
### Parameters
*mandatory :*
- `columns` (*list*): list of columns to concatenate (at least 2 columns)
- `new_column` (*str*): the destination column
*optional :*
- `sep` (*str*): the separator
"""
if len(columns) < 2:
raise ValueError('The `columns` parameter needs to have at least 2 columns')
first_col, *other_cols = columns
df.loc[:, new_column] = df[first_col].astype(str).str.cat(df[other_cols].astype(str), sep=sep)
return df
|
def concat(
df,
*,
columns: List[str],
new_column: str,
sep: str = None
):
"""
Concatenate `columns` element-wise
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.cat.html) for more information
---
### Parameters
*mandatory :*
- `columns` (*list*): list of columns to concatenate (at least 2 columns)
- `new_column` (*str*): the destination column
*optional :*
- `sep` (*str*): the separator
"""
if len(columns) < 2:
raise ValueError('The `columns` parameter needs to have at least 2 columns')
first_col, *other_cols = columns
df.loc[:, new_column] = df[first_col].astype(str).str.cat(df[other_cols].astype(str), sep=sep)
return df
|
[
"Concatenate",
"columns",
"element",
"-",
"wise",
"See",
"[",
"pandas",
"doc",
"]",
"(",
"https",
":",
"//",
"pandas",
".",
"pydata",
".",
"org",
"/",
"pandas",
"-",
"docs",
"/",
"stable",
"/",
"reference",
"/",
"api",
"/",
"pandas",
".",
"Series",
".",
"str",
".",
"cat",
".",
"html",
")",
"for",
"more",
"information"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/text.py#L452-L479
|
[
"def",
"concat",
"(",
"df",
",",
"*",
",",
"columns",
":",
"List",
"[",
"str",
"]",
",",
"new_column",
":",
"str",
",",
"sep",
":",
"str",
"=",
"None",
")",
":",
"if",
"len",
"(",
"columns",
")",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"'The `columns` parameter needs to have at least 2 columns'",
")",
"first_col",
",",
"",
"*",
"other_cols",
"=",
"columns",
"df",
".",
"loc",
"[",
":",
",",
"new_column",
"]",
"=",
"df",
"[",
"first_col",
"]",
".",
"astype",
"(",
"str",
")",
".",
"str",
".",
"cat",
"(",
"df",
"[",
"other_cols",
"]",
".",
"astype",
"(",
"str",
")",
",",
"sep",
"=",
"sep",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
contains
|
Test if pattern or regex is contained within strings of `column`
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.contains.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
- `pat` (*str*): character sequence or regular expression.
*optional :*
- `new_column` (*str*): the destination column (if not set, `column` will be used)
- `case` (*boolean*): if true, case sensitive.
- `na`: fill value for missing values.
- `regex` (*boolean*): default true
|
toucan_data_sdk/utils/postprocess/text.py
|
def contains(
df,
column: str,
*,
pat: str,
new_column: str = None,
case: bool = True,
na: Any = None,
regex: bool = True
):
"""
Test if pattern or regex is contained within strings of `column`
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.contains.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
- `pat` (*str*): character sequence or regular expression.
*optional :*
- `new_column` (*str*): the destination column (if not set, `column` will be used)
- `case` (*boolean*): if true, case sensitive.
- `na`: fill value for missing values.
- `regex` (*boolean*): default true
"""
new_column = new_column or column
df.loc[:, new_column] = df[column].str.contains(pat, case=case, na=na, regex=regex)
return df
|
def contains(
df,
column: str,
*,
pat: str,
new_column: str = None,
case: bool = True,
na: Any = None,
regex: bool = True
):
"""
Test if pattern or regex is contained within strings of `column`
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.contains.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
- `pat` (*str*): character sequence or regular expression.
*optional :*
- `new_column` (*str*): the destination column (if not set, `column` will be used)
- `case` (*boolean*): if true, case sensitive.
- `na`: fill value for missing values.
- `regex` (*boolean*): default true
"""
new_column = new_column or column
df.loc[:, new_column] = df[column].str.contains(pat, case=case, na=na, regex=regex)
return df
|
[
"Test",
"if",
"pattern",
"or",
"regex",
"is",
"contained",
"within",
"strings",
"of",
"column",
"See",
"[",
"pandas",
"doc",
"]",
"(",
"https",
":",
"//",
"pandas",
".",
"pydata",
".",
"org",
"/",
"pandas",
"-",
"docs",
"/",
"stable",
"/",
"reference",
"/",
"api",
"/",
"pandas",
".",
"Series",
".",
"str",
".",
"contains",
".",
"html",
")",
"for",
"more",
"information"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/text.py#L482-L513
|
[
"def",
"contains",
"(",
"df",
",",
"column",
":",
"str",
",",
"*",
",",
"pat",
":",
"str",
",",
"new_column",
":",
"str",
"=",
"None",
",",
"case",
":",
"bool",
"=",
"True",
",",
"na",
":",
"Any",
"=",
"None",
",",
"regex",
":",
"bool",
"=",
"True",
")",
":",
"new_column",
"=",
"new_column",
"or",
"column",
"df",
".",
"loc",
"[",
":",
",",
"new_column",
"]",
"=",
"df",
"[",
"column",
"]",
".",
"str",
".",
"contains",
"(",
"pat",
",",
"case",
"=",
"case",
",",
"na",
"=",
"na",
",",
"regex",
"=",
"regex",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
repeat
|
Duplicate each string in `column` by indicated number of time
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.repeat.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
- `times` (*int*): times to repeat the string
*optional :*
- `new_column` (*str*): the destination column (if not set, `column` will be used)
|
toucan_data_sdk/utils/postprocess/text.py
|
def repeat(
df,
column: str,
*,
times: int,
new_column: str = None
):
"""
Duplicate each string in `column` by indicated number of time
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.repeat.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
- `times` (*int*): times to repeat the string
*optional :*
- `new_column` (*str*): the destination column (if not set, `column` will be used)
"""
new_column = new_column or column
df.loc[:, new_column] = df[column].str.repeat(times)
return df
|
def repeat(
df,
column: str,
*,
times: int,
new_column: str = None
):
"""
Duplicate each string in `column` by indicated number of time
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.repeat.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
- `times` (*int*): times to repeat the string
*optional :*
- `new_column` (*str*): the destination column (if not set, `column` will be used)
"""
new_column = new_column or column
df.loc[:, new_column] = df[column].str.repeat(times)
return df
|
[
"Duplicate",
"each",
"string",
"in",
"column",
"by",
"indicated",
"number",
"of",
"time",
"See",
"[",
"pandas",
"doc",
"]",
"(",
"https",
":",
"//",
"pandas",
".",
"pydata",
".",
"org",
"/",
"pandas",
"-",
"docs",
"/",
"stable",
"/",
"reference",
"/",
"api",
"/",
"pandas",
".",
"Series",
".",
"str",
".",
"repeat",
".",
"html",
")",
"for",
"more",
"information"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/text.py#L516-L541
|
[
"def",
"repeat",
"(",
"df",
",",
"column",
":",
"str",
",",
"*",
",",
"times",
":",
"int",
",",
"new_column",
":",
"str",
"=",
"None",
")",
":",
"new_column",
"=",
"new_column",
"or",
"column",
"df",
".",
"loc",
"[",
":",
",",
"new_column",
"]",
"=",
"df",
"[",
"column",
"]",
".",
"str",
".",
"repeat",
"(",
"times",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
replace_pattern
|
Replace occurrences of pattern/regex in `column` with some other string
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
- `pat` (*str*): character sequence or regular expression
- `repl` (*str*): replacement string
*optional :*
- `new_column` (*str*): the destination column (if not set, `column` will be used)
- `case` (*boolean*): if true, case sensitive.
- `regex` (*boolean*): default true
|
toucan_data_sdk/utils/postprocess/text.py
|
def replace_pattern(
df,
column: str,
*,
pat: str,
repl: str,
new_column: str = None,
case: bool = True,
regex: bool = True
):
"""
Replace occurrences of pattern/regex in `column` with some other string
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
- `pat` (*str*): character sequence or regular expression
- `repl` (*str*): replacement string
*optional :*
- `new_column` (*str*): the destination column (if not set, `column` will be used)
- `case` (*boolean*): if true, case sensitive.
- `regex` (*boolean*): default true
"""
new_column = new_column or column
df.loc[:, new_column] = df[column].str.replace(pat, repl, case=case, regex=regex)
return df
|
def replace_pattern(
df,
column: str,
*,
pat: str,
repl: str,
new_column: str = None,
case: bool = True,
regex: bool = True
):
"""
Replace occurrences of pattern/regex in `column` with some other string
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
- `pat` (*str*): character sequence or regular expression
- `repl` (*str*): replacement string
*optional :*
- `new_column` (*str*): the destination column (if not set, `column` will be used)
- `case` (*boolean*): if true, case sensitive.
- `regex` (*boolean*): default true
"""
new_column = new_column or column
df.loc[:, new_column] = df[column].str.replace(pat, repl, case=case, regex=regex)
return df
|
[
"Replace",
"occurrences",
"of",
"pattern",
"/",
"regex",
"in",
"column",
"with",
"some",
"other",
"string",
"See",
"[",
"pandas",
"doc",
"]",
"(",
"https",
":",
"//",
"pandas",
".",
"pydata",
".",
"org",
"/",
"pandas",
"-",
"docs",
"/",
"stable",
"/",
"reference",
"/",
"api",
"/",
"pandas",
".",
"Series",
".",
"str",
".",
"replace",
".",
"html",
")",
"for",
"more",
"information"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/text.py#L544-L575
|
[
"def",
"replace_pattern",
"(",
"df",
",",
"column",
":",
"str",
",",
"*",
",",
"pat",
":",
"str",
",",
"repl",
":",
"str",
",",
"new_column",
":",
"str",
"=",
"None",
",",
"case",
":",
"bool",
"=",
"True",
",",
"regex",
":",
"bool",
"=",
"True",
")",
":",
"new_column",
"=",
"new_column",
"or",
"column",
"df",
".",
"loc",
"[",
":",
",",
"new_column",
"]",
"=",
"df",
"[",
"column",
"]",
".",
"str",
".",
"replace",
"(",
"pat",
",",
"repl",
",",
"case",
"=",
"case",
",",
"regex",
"=",
"regex",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
catch
|
Decorator to catch an exception and don't raise it.
Logs information if a decorator failed.
Note:
We don't want possible exceptions during logging to be raised.
This is used to decorate any function that gets executed
before or after the execution of the decorated function.
|
toucan_data_sdk/utils/decorators.py
|
def catch(logger):
"""
Decorator to catch an exception and don't raise it.
Logs information if a decorator failed.
Note:
We don't want possible exceptions during logging to be raised.
This is used to decorate any function that gets executed
before or after the execution of the decorated function.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
logger.warning(f"Exception raised in decorator: {func.__name__}")
return wrapper
return decorator
|
def catch(logger):
"""
Decorator to catch an exception and don't raise it.
Logs information if a decorator failed.
Note:
We don't want possible exceptions during logging to be raised.
This is used to decorate any function that gets executed
before or after the execution of the decorated function.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
logger.warning(f"Exception raised in decorator: {func.__name__}")
return wrapper
return decorator
|
[
"Decorator",
"to",
"catch",
"an",
"exception",
"and",
"don",
"t",
"raise",
"it",
".",
"Logs",
"information",
"if",
"a",
"decorator",
"failed",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/decorators.py#L61-L80
|
[
"def",
"catch",
"(",
"logger",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
":",
"logger",
".",
"warning",
"(",
"f\"Exception raised in decorator: {func.__name__}\"",
")",
"return",
"wrapper",
"return",
"decorator"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
log_message
|
Decorator to log a message before executing a function
|
toucan_data_sdk/utils/decorators.py
|
def log_message(logger, message=""):
"""
Decorator to log a message before executing a function
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
_log_message(logger, func.__name__, message)
result = func(*args, **kwargs)
return result
return wrapper
return decorator
|
def log_message(logger, message=""):
"""
Decorator to log a message before executing a function
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
_log_message(logger, func.__name__, message)
result = func(*args, **kwargs)
return result
return wrapper
return decorator
|
[
"Decorator",
"to",
"log",
"a",
"message",
"before",
"executing",
"a",
"function"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/decorators.py#L109-L120
|
[
"def",
"log_message",
"(",
"logger",
",",
"message",
"=",
"\"\"",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_log_message",
"(",
"logger",
",",
"func",
".",
"__name__",
",",
"message",
")",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"result",
"return",
"wrapper",
"return",
"decorator"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
log_time
|
Decorator to log the execution time of a function
|
toucan_data_sdk/utils/decorators.py
|
def log_time(logger):
"""
Decorator to log the execution time of a function
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
_log_time(logger, func.__name__, start, end)
return result
return wrapper
return decorator
|
def log_time(logger):
"""
Decorator to log the execution time of a function
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
_log_time(logger, func.__name__, start, end)
return result
return wrapper
return decorator
|
[
"Decorator",
"to",
"log",
"the",
"execution",
"time",
"of",
"a",
"function"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/decorators.py#L123-L136
|
[
"def",
"log_time",
"(",
"logger",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"end",
"=",
"time",
".",
"time",
"(",
")",
"_log_time",
"(",
"logger",
",",
"func",
".",
"__name__",
",",
"start",
",",
"end",
")",
"return",
"result",
"return",
"wrapper",
"return",
"decorator"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
log_shapes
|
Decorator to log the shapes of input and output dataframes
It considers all the dataframes passed either as arguments or keyword arguments as inputs
and all the dataframes returned as outputs.
|
toucan_data_sdk/utils/decorators.py
|
def log_shapes(logger):
"""
Decorator to log the shapes of input and output dataframes
It considers all the dataframes passed either as arguments or keyword arguments as inputs
and all the dataframes returned as outputs.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
input_shapes = _get_dfs_shapes(*args, **kwargs)
result = func(*args, **kwargs)
output_shapes = _get_dfs_shapes(result)
_log_shapes(logger, func.__name__, input_shapes, output_shapes)
return result
return wrapper
return decorator
|
def log_shapes(logger):
"""
Decorator to log the shapes of input and output dataframes
It considers all the dataframes passed either as arguments or keyword arguments as inputs
and all the dataframes returned as outputs.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
input_shapes = _get_dfs_shapes(*args, **kwargs)
result = func(*args, **kwargs)
output_shapes = _get_dfs_shapes(result)
_log_shapes(logger, func.__name__, input_shapes, output_shapes)
return result
return wrapper
return decorator
|
[
"Decorator",
"to",
"log",
"the",
"shapes",
"of",
"input",
"and",
"output",
"dataframes"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/decorators.py#L139-L155
|
[
"def",
"log_shapes",
"(",
"logger",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"input_shapes",
"=",
"_get_dfs_shapes",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"output_shapes",
"=",
"_get_dfs_shapes",
"(",
"result",
")",
"_log_shapes",
"(",
"logger",
",",
"func",
".",
"__name__",
",",
"input_shapes",
",",
"output_shapes",
")",
"return",
"result",
"return",
"wrapper",
"return",
"decorator"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
log
|
Basic log decorator
Can be used as :
- @log (with default logger)
- @log(mylogger)
- @log(start_message='Hello !", logger=mylogger, end_message='Bye !')
|
toucan_data_sdk/utils/decorators.py
|
def log(logger=None, start_message='Starting...', end_message='Done...'):
"""
Basic log decorator
Can be used as :
- @log (with default logger)
- @log(mylogger)
- @log(start_message='Hello !", logger=mylogger, end_message='Bye !')
"""
def actual_log(f, real_logger=logger):
logger = real_logger or _logger
@wraps(f)
def timed(*args, **kwargs):
logger.info(f'{f.__name__} - {start_message}')
start = time.time()
res = f(*args, **kwargs)
end = time.time()
logger.info(f'{f.__name__} - {end_message} (took {end - start:.2f}s)')
return res
return timed
if callable(logger):
return actual_log(logger, real_logger=None)
return actual_log
|
def log(logger=None, start_message='Starting...', end_message='Done...'):
"""
Basic log decorator
Can be used as :
- @log (with default logger)
- @log(mylogger)
- @log(start_message='Hello !", logger=mylogger, end_message='Bye !')
"""
def actual_log(f, real_logger=logger):
logger = real_logger or _logger
@wraps(f)
def timed(*args, **kwargs):
logger.info(f'{f.__name__} - {start_message}')
start = time.time()
res = f(*args, **kwargs)
end = time.time()
logger.info(f'{f.__name__} - {end_message} (took {end - start:.2f}s)')
return res
return timed
if callable(logger):
return actual_log(logger, real_logger=None)
return actual_log
|
[
"Basic",
"log",
"decorator",
"Can",
"be",
"used",
"as",
":",
"-"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/decorators.py#L158-L182
|
[
"def",
"log",
"(",
"logger",
"=",
"None",
",",
"start_message",
"=",
"'Starting...'",
",",
"end_message",
"=",
"'Done...'",
")",
":",
"def",
"actual_log",
"(",
"f",
",",
"real_logger",
"=",
"logger",
")",
":",
"logger",
"=",
"real_logger",
"or",
"_logger",
"@",
"wraps",
"(",
"f",
")",
"def",
"timed",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
".",
"info",
"(",
"f'{f.__name__} - {start_message}'",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"res",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"end",
"=",
"time",
".",
"time",
"(",
")",
"logger",
".",
"info",
"(",
"f'{f.__name__} - {end_message} (took {end - start:.2f}s)'",
")",
"return",
"res",
"return",
"timed",
"if",
"callable",
"(",
"logger",
")",
":",
"return",
"actual_log",
"(",
"logger",
",",
"real_logger",
"=",
"None",
")",
"return",
"actual_log"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
domain
|
Allow to apply a function f(df: DataFrame) -> DataFrame) on dfs by specifying the key
E.g instead of writing:
def process_domain1(dfs):
df = dfs['domain1']
# actual process
dfs['domain1'] = df
return dfs
You can write:
@domain('domain1')
def process_domain1(df):
#actual process
return df
|
toucan_data_sdk/utils/decorators.py
|
def domain(domain_name):
"""
Allow to apply a function f(df: DataFrame) -> DataFrame) on dfs by specifying the key
E.g instead of writing:
def process_domain1(dfs):
df = dfs['domain1']
# actual process
dfs['domain1'] = df
return dfs
You can write:
@domain('domain1')
def process_domain1(df):
#actual process
return df
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
dfs, *args = args
if not isinstance(dfs, dict):
raise TypeError(f'{dfs} is not a dict')
df = dfs.pop(domain_name)
df = func(df, *args, **kwargs)
return {domain_name: df, **dfs}
return wrapper
return decorator
|
def domain(domain_name):
"""
Allow to apply a function f(df: DataFrame) -> DataFrame) on dfs by specifying the key
E.g instead of writing:
def process_domain1(dfs):
df = dfs['domain1']
# actual process
dfs['domain1'] = df
return dfs
You can write:
@domain('domain1')
def process_domain1(df):
#actual process
return df
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
dfs, *args = args
if not isinstance(dfs, dict):
raise TypeError(f'{dfs} is not a dict')
df = dfs.pop(domain_name)
df = func(df, *args, **kwargs)
return {domain_name: df, **dfs}
return wrapper
return decorator
|
[
"Allow",
"to",
"apply",
"a",
"function",
"f",
"(",
"df",
":",
"DataFrame",
")",
"-",
">",
"DataFrame",
")",
"on",
"dfs",
"by",
"specifying",
"the",
"key",
"E",
".",
"g",
"instead",
"of",
"writing",
":",
"def",
"process_domain1",
"(",
"dfs",
")",
":",
"df",
"=",
"dfs",
"[",
"domain1",
"]",
"#",
"actual",
"process",
"dfs",
"[",
"domain1",
"]",
"=",
"df",
"return",
"dfs"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/decorators.py#L185-L213
|
[
"def",
"domain",
"(",
"domain_name",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"dfs",
",",
"",
"*",
"args",
"=",
"args",
"if",
"not",
"isinstance",
"(",
"dfs",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"f'{dfs} is not a dict'",
")",
"df",
"=",
"dfs",
".",
"pop",
"(",
"domain_name",
")",
"df",
"=",
"func",
"(",
"df",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"{",
"domain_name",
":",
"df",
",",
"*",
"*",
"dfs",
"}",
"return",
"wrapper",
"return",
"decorator"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
cache
|
Avoid to recompute a function if its parameters and its source code doesnt have changed.
Args:
requires: list of dependencies (functions or function names)
disabled (bool): disable the cache mecanism for this function (useful if you
only want to use the dependency mecanism)
applied_on_method (bool): ignore the first argument (useful to ignore "self")
check_param (True, False or a str): the name of the parameter to check.
False to not check any of them.
True (default) to check all of them.
limit (int or None): number of cache entries to keep (no limit by default)
|
toucan_data_sdk/utils/decorators.py
|
def cache( # noqa: C901
requires=None,
disabled=False,
applied_on_method=False,
check_param=True,
limit=None
):
""" Avoid to recompute a function if its parameters and its source code doesnt have changed.
Args:
requires: list of dependencies (functions or function names)
disabled (bool): disable the cache mecanism for this function (useful if you
only want to use the dependency mecanism)
applied_on_method (bool): ignore the first argument (useful to ignore "self")
check_param (True, False or a str): the name of the parameter to check.
False to not check any of them.
True (default) to check all of them.
limit (int or None): number of cache entries to keep (no limit by default)
"""
if not requires:
requires = []
elif isinstance(requires, collections.Callable):
requires = [requires]
if not isinstance(check_param, (bool, str)):
raise TypeError("'check_param' must be a str (name of the param to check) or a bool")
if limit is not None and not isinstance(limit, int):
raise TypeError("'limit' must be an int (number of cache entries to keep) or None")
# We keep data in the function attributes so that this data
# is not erased between two calls:
if not hasattr(cache, 'funcs_references'):
cache.funcs_references = {} # dict of {function_name -> function_object (or None)}
if not hasattr(cache, 'dependencies'):
cache.dependencies = {} # dict of {function_name -> [list of function names]}
if not hasattr(cache, 'memories'):
cache.memories = {} # dict of {thread_id -> joblib.Memory object}
def decorator(func):
""" This code is executed when the augment module is read (when decorator is applied).
Here we populate cache.funcs_references and cache.dependencies to use them later. """
cache.funcs_references[func.__name__] = get_orig_function(func)
dependencies_names = []
for requirement in requires:
if isinstance(requirement, collections.Callable):
req_name = requirement.__name__
cache.funcs_references[req_name] = get_orig_function(requirement)
elif requirement not in cache.funcs_references:
req_name = requirement
cache.funcs_references[req_name] = None
dependencies_names.append(req_name)
cache.dependencies[func.__name__] = dependencies_names
@wraps(func)
def wrapper(*args, **kwargs):
""" This code is executed when a decorated function is actually executed.
It uses the previously built dependency tree (see above). """
current_memory = cache.memories.get(current_thread().name)
if disabled is True or current_memory is None:
return func(*args, **kwargs)
# if cache is enabled, we compute the md5 hash of the concatenated source codes
# of all the dependencies.
concatenated_source_code = ''
dependencies = resolve_dependencies(func.__name__, cache.dependencies)
for func_name in dependencies:
function = cache.funcs_references[func_name]
if function is None:
raise Exception(f"Can't get source code of function '{func_name}'")
source_code = get_func_sourcecode(function)
concatenated_source_code += source_code
md5_hash = md5(str.encode(concatenated_source_code)).hexdigest()
# Add extra parameters so that joblib checks they didnt have changed:
tmp_extra_kwargs = {
'__func_dependencies_hash__': md5_hash,
'__original_func_name__': func.__name__,
}
if check_param is True:
kwargs.update(tmp_extra_kwargs)
if applied_on_method:
self_arg, args = args[0], args[1:]
@wraps(func)
def f(*args, **kwargs):
# delete the extra parameters that the underlying function doesnt expect:
for k in tmp_extra_kwargs.keys():
del kwargs[k]
if applied_on_method:
args = (self_arg,) + args
return func(*args, **kwargs)
f = current_memory.cache(f)
result = f(*args, **kwargs)
else:
if isinstance(check_param, str):
check_only_param_value = get_param_value_from_func_call(
param_name=check_param,
func=func,
call_args=args,
call_kwargs=kwargs,
)
tmp_extra_kwargs['__check_only__'] = check_only_param_value
@wraps(func)
def f(**tmp_extra_kwargs):
return func(*args, **kwargs)
f = current_memory.cache(f)
result = f(**tmp_extra_kwargs)
if limit is not None:
clean_cachedir_old_entries(f.store_backend, func.__name__, limit)
return result
return wrapper
return decorator
|
def cache( # noqa: C901
requires=None,
disabled=False,
applied_on_method=False,
check_param=True,
limit=None
):
""" Avoid to recompute a function if its parameters and its source code doesnt have changed.
Args:
requires: list of dependencies (functions or function names)
disabled (bool): disable the cache mecanism for this function (useful if you
only want to use the dependency mecanism)
applied_on_method (bool): ignore the first argument (useful to ignore "self")
check_param (True, False or a str): the name of the parameter to check.
False to not check any of them.
True (default) to check all of them.
limit (int or None): number of cache entries to keep (no limit by default)
"""
if not requires:
requires = []
elif isinstance(requires, collections.Callable):
requires = [requires]
if not isinstance(check_param, (bool, str)):
raise TypeError("'check_param' must be a str (name of the param to check) or a bool")
if limit is not None and not isinstance(limit, int):
raise TypeError("'limit' must be an int (number of cache entries to keep) or None")
# We keep data in the function attributes so that this data
# is not erased between two calls:
if not hasattr(cache, 'funcs_references'):
cache.funcs_references = {} # dict of {function_name -> function_object (or None)}
if not hasattr(cache, 'dependencies'):
cache.dependencies = {} # dict of {function_name -> [list of function names]}
if not hasattr(cache, 'memories'):
cache.memories = {} # dict of {thread_id -> joblib.Memory object}
def decorator(func):
""" This code is executed when the augment module is read (when decorator is applied).
Here we populate cache.funcs_references and cache.dependencies to use them later. """
cache.funcs_references[func.__name__] = get_orig_function(func)
dependencies_names = []
for requirement in requires:
if isinstance(requirement, collections.Callable):
req_name = requirement.__name__
cache.funcs_references[req_name] = get_orig_function(requirement)
elif requirement not in cache.funcs_references:
req_name = requirement
cache.funcs_references[req_name] = None
dependencies_names.append(req_name)
cache.dependencies[func.__name__] = dependencies_names
@wraps(func)
def wrapper(*args, **kwargs):
""" This code is executed when a decorated function is actually executed.
It uses the previously built dependency tree (see above). """
current_memory = cache.memories.get(current_thread().name)
if disabled is True or current_memory is None:
return func(*args, **kwargs)
# if cache is enabled, we compute the md5 hash of the concatenated source codes
# of all the dependencies.
concatenated_source_code = ''
dependencies = resolve_dependencies(func.__name__, cache.dependencies)
for func_name in dependencies:
function = cache.funcs_references[func_name]
if function is None:
raise Exception(f"Can't get source code of function '{func_name}'")
source_code = get_func_sourcecode(function)
concatenated_source_code += source_code
md5_hash = md5(str.encode(concatenated_source_code)).hexdigest()
# Add extra parameters so that joblib checks they didnt have changed:
tmp_extra_kwargs = {
'__func_dependencies_hash__': md5_hash,
'__original_func_name__': func.__name__,
}
if check_param is True:
kwargs.update(tmp_extra_kwargs)
if applied_on_method:
self_arg, args = args[0], args[1:]
@wraps(func)
def f(*args, **kwargs):
# delete the extra parameters that the underlying function doesnt expect:
for k in tmp_extra_kwargs.keys():
del kwargs[k]
if applied_on_method:
args = (self_arg,) + args
return func(*args, **kwargs)
f = current_memory.cache(f)
result = f(*args, **kwargs)
else:
if isinstance(check_param, str):
check_only_param_value = get_param_value_from_func_call(
param_name=check_param,
func=func,
call_args=args,
call_kwargs=kwargs,
)
tmp_extra_kwargs['__check_only__'] = check_only_param_value
@wraps(func)
def f(**tmp_extra_kwargs):
return func(*args, **kwargs)
f = current_memory.cache(f)
result = f(**tmp_extra_kwargs)
if limit is not None:
clean_cachedir_old_entries(f.store_backend, func.__name__, limit)
return result
return wrapper
return decorator
|
[
"Avoid",
"to",
"recompute",
"a",
"function",
"if",
"its",
"parameters",
"and",
"its",
"source",
"code",
"doesnt",
"have",
"changed",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/decorators.py#L219-L341
|
[
"def",
"cache",
"(",
"# noqa: C901",
"requires",
"=",
"None",
",",
"disabled",
"=",
"False",
",",
"applied_on_method",
"=",
"False",
",",
"check_param",
"=",
"True",
",",
"limit",
"=",
"None",
")",
":",
"if",
"not",
"requires",
":",
"requires",
"=",
"[",
"]",
"elif",
"isinstance",
"(",
"requires",
",",
"collections",
".",
"Callable",
")",
":",
"requires",
"=",
"[",
"requires",
"]",
"if",
"not",
"isinstance",
"(",
"check_param",
",",
"(",
"bool",
",",
"str",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"'check_param' must be a str (name of the param to check) or a bool\"",
")",
"if",
"limit",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"limit",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"'limit' must be an int (number of cache entries to keep) or None\"",
")",
"# We keep data in the function attributes so that this data",
"# is not erased between two calls:",
"if",
"not",
"hasattr",
"(",
"cache",
",",
"'funcs_references'",
")",
":",
"cache",
".",
"funcs_references",
"=",
"{",
"}",
"# dict of {function_name -> function_object (or None)}",
"if",
"not",
"hasattr",
"(",
"cache",
",",
"'dependencies'",
")",
":",
"cache",
".",
"dependencies",
"=",
"{",
"}",
"# dict of {function_name -> [list of function names]}",
"if",
"not",
"hasattr",
"(",
"cache",
",",
"'memories'",
")",
":",
"cache",
".",
"memories",
"=",
"{",
"}",
"# dict of {thread_id -> joblib.Memory object}",
"def",
"decorator",
"(",
"func",
")",
":",
"\"\"\" This code is executed when the augment module is read (when decorator is applied).\n Here we populate cache.funcs_references and cache.dependencies to use them later. \"\"\"",
"cache",
".",
"funcs_references",
"[",
"func",
".",
"__name__",
"]",
"=",
"get_orig_function",
"(",
"func",
")",
"dependencies_names",
"=",
"[",
"]",
"for",
"requirement",
"in",
"requires",
":",
"if",
"isinstance",
"(",
"requirement",
",",
"collections",
".",
"Callable",
")",
":",
"req_name",
"=",
"requirement",
".",
"__name__",
"cache",
".",
"funcs_references",
"[",
"req_name",
"]",
"=",
"get_orig_function",
"(",
"requirement",
")",
"elif",
"requirement",
"not",
"in",
"cache",
".",
"funcs_references",
":",
"req_name",
"=",
"requirement",
"cache",
".",
"funcs_references",
"[",
"req_name",
"]",
"=",
"None",
"dependencies_names",
".",
"append",
"(",
"req_name",
")",
"cache",
".",
"dependencies",
"[",
"func",
".",
"__name__",
"]",
"=",
"dependencies_names",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\" This code is executed when a decorated function is actually executed.\n It uses the previously built dependency tree (see above). \"\"\"",
"current_memory",
"=",
"cache",
".",
"memories",
".",
"get",
"(",
"current_thread",
"(",
")",
".",
"name",
")",
"if",
"disabled",
"is",
"True",
"or",
"current_memory",
"is",
"None",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# if cache is enabled, we compute the md5 hash of the concatenated source codes",
"# of all the dependencies.",
"concatenated_source_code",
"=",
"''",
"dependencies",
"=",
"resolve_dependencies",
"(",
"func",
".",
"__name__",
",",
"cache",
".",
"dependencies",
")",
"for",
"func_name",
"in",
"dependencies",
":",
"function",
"=",
"cache",
".",
"funcs_references",
"[",
"func_name",
"]",
"if",
"function",
"is",
"None",
":",
"raise",
"Exception",
"(",
"f\"Can't get source code of function '{func_name}'\"",
")",
"source_code",
"=",
"get_func_sourcecode",
"(",
"function",
")",
"concatenated_source_code",
"+=",
"source_code",
"md5_hash",
"=",
"md5",
"(",
"str",
".",
"encode",
"(",
"concatenated_source_code",
")",
")",
".",
"hexdigest",
"(",
")",
"# Add extra parameters so that joblib checks they didnt have changed:",
"tmp_extra_kwargs",
"=",
"{",
"'__func_dependencies_hash__'",
":",
"md5_hash",
",",
"'__original_func_name__'",
":",
"func",
".",
"__name__",
",",
"}",
"if",
"check_param",
"is",
"True",
":",
"kwargs",
".",
"update",
"(",
"tmp_extra_kwargs",
")",
"if",
"applied_on_method",
":",
"self_arg",
",",
"args",
"=",
"args",
"[",
"0",
"]",
",",
"args",
"[",
"1",
":",
"]",
"@",
"wraps",
"(",
"func",
")",
"def",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# delete the extra parameters that the underlying function doesnt expect:",
"for",
"k",
"in",
"tmp_extra_kwargs",
".",
"keys",
"(",
")",
":",
"del",
"kwargs",
"[",
"k",
"]",
"if",
"applied_on_method",
":",
"args",
"=",
"(",
"self_arg",
",",
")",
"+",
"args",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"f",
"=",
"current_memory",
".",
"cache",
"(",
"f",
")",
"result",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"if",
"isinstance",
"(",
"check_param",
",",
"str",
")",
":",
"check_only_param_value",
"=",
"get_param_value_from_func_call",
"(",
"param_name",
"=",
"check_param",
",",
"func",
"=",
"func",
",",
"call_args",
"=",
"args",
",",
"call_kwargs",
"=",
"kwargs",
",",
")",
"tmp_extra_kwargs",
"[",
"'__check_only__'",
"]",
"=",
"check_only_param_value",
"@",
"wraps",
"(",
"func",
")",
"def",
"f",
"(",
"*",
"*",
"tmp_extra_kwargs",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"f",
"=",
"current_memory",
".",
"cache",
"(",
"f",
")",
"result",
"=",
"f",
"(",
"*",
"*",
"tmp_extra_kwargs",
")",
"if",
"limit",
"is",
"not",
"None",
":",
"clean_cachedir_old_entries",
"(",
"f",
".",
"store_backend",
",",
"func",
".",
"__name__",
",",
"limit",
")",
"return",
"result",
"return",
"wrapper",
"return",
"decorator"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
setup_cachedir
|
This function injects a joblib.Memory object in the cache() function
(in a thread-specific slot of its 'memories' attribute).
|
toucan_data_sdk/utils/decorators.py
|
def setup_cachedir(cachedir, mmap_mode=None, bytes_limit=None):
""" This function injects a joblib.Memory object in the cache() function
(in a thread-specific slot of its 'memories' attribute). """
if not hasattr(cache, 'memories'):
cache.memories = {}
memory = joblib.Memory(
location=cachedir,
verbose=0,
mmap_mode=mmap_mode,
bytes_limit=bytes_limit,
)
cache.memories[current_thread().name] = memory
return memory
|
def setup_cachedir(cachedir, mmap_mode=None, bytes_limit=None):
""" This function injects a joblib.Memory object in the cache() function
(in a thread-specific slot of its 'memories' attribute). """
if not hasattr(cache, 'memories'):
cache.memories = {}
memory = joblib.Memory(
location=cachedir,
verbose=0,
mmap_mode=mmap_mode,
bytes_limit=bytes_limit,
)
cache.memories[current_thread().name] = memory
return memory
|
[
"This",
"function",
"injects",
"a",
"joblib",
".",
"Memory",
"object",
"in",
"the",
"cache",
"()",
"function",
"(",
"in",
"a",
"thread",
"-",
"specific",
"slot",
"of",
"its",
"memories",
"attribute",
")",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/decorators.py#L347-L360
|
[
"def",
"setup_cachedir",
"(",
"cachedir",
",",
"mmap_mode",
"=",
"None",
",",
"bytes_limit",
"=",
"None",
")",
":",
"if",
"not",
"hasattr",
"(",
"cache",
",",
"'memories'",
")",
":",
"cache",
".",
"memories",
"=",
"{",
"}",
"memory",
"=",
"joblib",
".",
"Memory",
"(",
"location",
"=",
"cachedir",
",",
"verbose",
"=",
"0",
",",
"mmap_mode",
"=",
"mmap_mode",
",",
"bytes_limit",
"=",
"bytes_limit",
",",
")",
"cache",
".",
"memories",
"[",
"current_thread",
"(",
")",
".",
"name",
"]",
"=",
"memory",
"return",
"memory"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
melt
|
A melt will transform a dataset by creating a column "variable" and a column "value".
This function is useful to transform a dataset into a format where one or more columns
are identifier variables, while all other columns, considered measured
variables (value_vars), are “unpivoted” to the row axis, leaving just two
non-identifier columns, `"variable"` and `"value"`.
---
### Parameters
*mandatory :*
- `id` (*list of str*): names of the columns that must be kept in column.
- `value` (*list of str*): names of the columns that will be transformed in long format (in rows).
*optional :*
- `dropna` (*boolean*): It allows you to drop missing values.
---
### Example
**Input**
| my_label | my_value | my_column_1 | my_column_2 | info_1 | info_2 | info_3 |
|:--------:|:--------:|:-----------:|:-----------:|:------:|:------:|:------:|
| toto | 10 | S45 | Lalaland | 10 | 20 | None |
```cson
melt:
id: ['my_label', 'my_value' 'my_column_1', 'my_colum_2']
value: ['info_1', 'info_2', 'info_3']
dropna: true
```
**Ouput**
| my_label | my_value | my_column_1 | my_column_2 | variable | value |
|:--------:|:--------:|:-----------:|:-----------:|:--------:|:------:|
| toto | 10 | S45 | Lalaland | info_1 | 10 |
| toto | 10 | S45 | Lalaland | info_2 | 20 |
|
toucan_data_sdk/utils/postprocess/melt.py
|
def melt(
df,
id: List[str],
value: List[str],
dropna=False
):
"""
A melt will transform a dataset by creating a column "variable" and a column "value".
This function is useful to transform a dataset into a format where one or more columns
are identifier variables, while all other columns, considered measured
variables (value_vars), are “unpivoted” to the row axis, leaving just two
non-identifier columns, `"variable"` and `"value"`.
---
### Parameters
*mandatory :*
- `id` (*list of str*): names of the columns that must be kept in column.
- `value` (*list of str*): names of the columns that will be transformed in long format (in rows).
*optional :*
- `dropna` (*boolean*): It allows you to drop missing values.
---
### Example
**Input**
| my_label | my_value | my_column_1 | my_column_2 | info_1 | info_2 | info_3 |
|:--------:|:--------:|:-----------:|:-----------:|:------:|:------:|:------:|
| toto | 10 | S45 | Lalaland | 10 | 20 | None |
```cson
melt:
id: ['my_label', 'my_value' 'my_column_1', 'my_colum_2']
value: ['info_1', 'info_2', 'info_3']
dropna: true
```
**Ouput**
| my_label | my_value | my_column_1 | my_column_2 | variable | value |
|:--------:|:--------:|:-----------:|:-----------:|:--------:|:------:|
| toto | 10 | S45 | Lalaland | info_1 | 10 |
| toto | 10 | S45 | Lalaland | info_2 | 20 |
"""
df = df[(id + value)]
df = pd.melt(df, id_vars=id, value_vars=value)
if dropna:
df = df.dropna(subset=['value'])
return df
|
def melt(
df,
id: List[str],
value: List[str],
dropna=False
):
"""
A melt will transform a dataset by creating a column "variable" and a column "value".
This function is useful to transform a dataset into a format where one or more columns
are identifier variables, while all other columns, considered measured
variables (value_vars), are “unpivoted” to the row axis, leaving just two
non-identifier columns, `"variable"` and `"value"`.
---
### Parameters
*mandatory :*
- `id` (*list of str*): names of the columns that must be kept in column.
- `value` (*list of str*): names of the columns that will be transformed in long format (in rows).
*optional :*
- `dropna` (*boolean*): It allows you to drop missing values.
---
### Example
**Input**
| my_label | my_value | my_column_1 | my_column_2 | info_1 | info_2 | info_3 |
|:--------:|:--------:|:-----------:|:-----------:|:------:|:------:|:------:|
| toto | 10 | S45 | Lalaland | 10 | 20 | None |
```cson
melt:
id: ['my_label', 'my_value' 'my_column_1', 'my_colum_2']
value: ['info_1', 'info_2', 'info_3']
dropna: true
```
**Ouput**
| my_label | my_value | my_column_1 | my_column_2 | variable | value |
|:--------:|:--------:|:-----------:|:-----------:|:--------:|:------:|
| toto | 10 | S45 | Lalaland | info_1 | 10 |
| toto | 10 | S45 | Lalaland | info_2 | 20 |
"""
df = df[(id + value)]
df = pd.melt(df, id_vars=id, value_vars=value)
if dropna:
df = df.dropna(subset=['value'])
return df
|
[
"A",
"melt",
"will",
"transform",
"a",
"dataset",
"by",
"creating",
"a",
"column",
"variable",
"and",
"a",
"column",
"value",
".",
"This",
"function",
"is",
"useful",
"to",
"transform",
"a",
"dataset",
"into",
"a",
"format",
"where",
"one",
"or",
"more",
"columns",
"are",
"identifier",
"variables",
"while",
"all",
"other",
"columns",
"considered",
"measured",
"variables",
"(",
"value_vars",
")",
"are",
"“unpivoted”",
"to",
"the",
"row",
"axis",
"leaving",
"just",
"two",
"non",
"-",
"identifier",
"columns",
"variable",
"and",
"value",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/melt.py#L6-L59
|
[
"def",
"melt",
"(",
"df",
",",
"id",
":",
"List",
"[",
"str",
"]",
",",
"value",
":",
"List",
"[",
"str",
"]",
",",
"dropna",
"=",
"False",
")",
":",
"df",
"=",
"df",
"[",
"(",
"id",
"+",
"value",
")",
"]",
"df",
"=",
"pd",
".",
"melt",
"(",
"df",
",",
"id_vars",
"=",
"id",
",",
"value_vars",
"=",
"value",
")",
"if",
"dropna",
":",
"df",
"=",
"df",
".",
"dropna",
"(",
"subset",
"=",
"[",
"'value'",
"]",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
rename
|
Replaces data values and column names according to the locale
---
### Parameters
- `values` (optional: dict):
- key: term to be replaced
- value:
- key: the locale e.g. 'en' or 'fr'
- value: term's translation
- `columns` (optional: dict):
- key: columns name to be replaced
- value:
- key: the locale e.g. 'en' or 'fr'
- value: column name's translation
- `locale` (optional: str): the locale you want to use.
By default the client locale is used.
---
### Example
**Input**
| label | value |
|:----------------:|:-----:|
| France | 100 |
| Europe wo France | 500 |
```cson
rename:
values:
'Europe wo France':
'en': 'Europe excl. France'
'fr': 'Europe excl. France'
columns:
'value':
'en': 'revenue'
'fr': 'revenue'
```
**Output**
| label | revenue |
|:-------------------:|:-------:|
| France | 100 |
| Europe excl. France | 500 |
|
toucan_data_sdk/utils/postprocess/rename.py
|
def rename(
df,
values: Dict[str, Dict[str, str]] = None,
columns: Dict[str, Dict[str, str]] = None,
locale: str = None
):
"""
Replaces data values and column names according to the locale
---
### Parameters
- `values` (optional: dict):
- key: term to be replaced
- value:
- key: the locale e.g. 'en' or 'fr'
- value: term's translation
- `columns` (optional: dict):
- key: columns name to be replaced
- value:
- key: the locale e.g. 'en' or 'fr'
- value: column name's translation
- `locale` (optional: str): the locale you want to use.
By default the client locale is used.
---
### Example
**Input**
| label | value |
|:----------------:|:-----:|
| France | 100 |
| Europe wo France | 500 |
```cson
rename:
values:
'Europe wo France':
'en': 'Europe excl. France'
'fr': 'Europe excl. France'
columns:
'value':
'en': 'revenue'
'fr': 'revenue'
```
**Output**
| label | revenue |
|:-------------------:|:-------:|
| France | 100 |
| Europe excl. France | 500 |
"""
if values:
to_replace = list(values.keys())
value = [values[term][locale] for term in values]
df = df.replace(to_replace=to_replace, value=value)
if columns:
_keys = list(columns.keys())
_values = [column[locale] for column in columns.values()]
columns = dict(list(zip(_keys, _values)))
df = df.rename(columns=columns)
return df
|
def rename(
df,
values: Dict[str, Dict[str, str]] = None,
columns: Dict[str, Dict[str, str]] = None,
locale: str = None
):
"""
Replaces data values and column names according to the locale
---
### Parameters
- `values` (optional: dict):
- key: term to be replaced
- value:
- key: the locale e.g. 'en' or 'fr'
- value: term's translation
- `columns` (optional: dict):
- key: columns name to be replaced
- value:
- key: the locale e.g. 'en' or 'fr'
- value: column name's translation
- `locale` (optional: str): the locale you want to use.
By default the client locale is used.
---
### Example
**Input**
| label | value |
|:----------------:|:-----:|
| France | 100 |
| Europe wo France | 500 |
```cson
rename:
values:
'Europe wo France':
'en': 'Europe excl. France'
'fr': 'Europe excl. France'
columns:
'value':
'en': 'revenue'
'fr': 'revenue'
```
**Output**
| label | revenue |
|:-------------------:|:-------:|
| France | 100 |
| Europe excl. France | 500 |
"""
if values:
to_replace = list(values.keys())
value = [values[term][locale] for term in values]
df = df.replace(to_replace=to_replace, value=value)
if columns:
_keys = list(columns.keys())
_values = [column[locale] for column in columns.values()]
columns = dict(list(zip(_keys, _values)))
df = df.rename(columns=columns)
return df
|
[
"Replaces",
"data",
"values",
"and",
"column",
"names",
"according",
"to",
"the",
"locale"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/rename.py#L4-L70
|
[
"def",
"rename",
"(",
"df",
",",
"values",
":",
"Dict",
"[",
"str",
",",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"None",
",",
"columns",
":",
"Dict",
"[",
"str",
",",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"None",
",",
"locale",
":",
"str",
"=",
"None",
")",
":",
"if",
"values",
":",
"to_replace",
"=",
"list",
"(",
"values",
".",
"keys",
"(",
")",
")",
"value",
"=",
"[",
"values",
"[",
"term",
"]",
"[",
"locale",
"]",
"for",
"term",
"in",
"values",
"]",
"df",
"=",
"df",
".",
"replace",
"(",
"to_replace",
"=",
"to_replace",
",",
"value",
"=",
"value",
")",
"if",
"columns",
":",
"_keys",
"=",
"list",
"(",
"columns",
".",
"keys",
"(",
")",
")",
"_values",
"=",
"[",
"column",
"[",
"locale",
"]",
"for",
"column",
"in",
"columns",
".",
"values",
"(",
")",
"]",
"columns",
"=",
"dict",
"(",
"list",
"(",
"zip",
"(",
"_keys",
",",
"_values",
")",
")",
")",
"df",
"=",
"df",
".",
"rename",
"(",
"columns",
"=",
"columns",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
compute_cumsum
|
Compute cumsum for a group of columns.
---
### Parameters
*mandatory :*
- `id_cols` (*list*): the columns id to create each group
- `reference_cols` (*list*): the columns to order the cumsum
- `value_cols` (*list*): the columns to cumsum
*optional :*
- `new_value_cols` (*list*): the new columns with the result cumsum
- `cols_to_keep` (*list*): other columns to keep in the dataset.
This option can be used if there is only one row by group [id_cols + reference_cols]
---
### Example
**Input**
MONTH | DAY | NAME | VALUE | X
:---:|:---:|:--:|:---:|:---:
1 | 1 | A | 1 | lo
2 | 1 | A | 1 | lo
2 | 15 | A | 1 | la
1 | 15 | B | 1 | la
```cson
compute_cumsum:
id_cols: ['NAME']
reference_cols: ['MONTH', 'DAY']
cumsum_cols: ['VALUE']
cols_to_keep: ['X']
```
**Output**
NAME | MONTH | DAY | X | VALUE
:---:|:---:|:--:|:---:|:---:
A | 1 | 1 | lo | 1
A | 2 | 1 | la | 2
A | 2 | 15 | lo | 3
B | 1 | 15 | la | 1
|
toucan_data_sdk/utils/generic/compute_cumsum.py
|
def compute_cumsum(
df,
id_cols: List[str],
reference_cols: List[str],
value_cols: List[str],
new_value_cols: List[str] = None,
cols_to_keep: List[str] = None
):
"""
Compute cumsum for a group of columns.
---
### Parameters
*mandatory :*
- `id_cols` (*list*): the columns id to create each group
- `reference_cols` (*list*): the columns to order the cumsum
- `value_cols` (*list*): the columns to cumsum
*optional :*
- `new_value_cols` (*list*): the new columns with the result cumsum
- `cols_to_keep` (*list*): other columns to keep in the dataset.
This option can be used if there is only one row by group [id_cols + reference_cols]
---
### Example
**Input**
MONTH | DAY | NAME | VALUE | X
:---:|:---:|:--:|:---:|:---:
1 | 1 | A | 1 | lo
2 | 1 | A | 1 | lo
2 | 15 | A | 1 | la
1 | 15 | B | 1 | la
```cson
compute_cumsum:
id_cols: ['NAME']
reference_cols: ['MONTH', 'DAY']
cumsum_cols: ['VALUE']
cols_to_keep: ['X']
```
**Output**
NAME | MONTH | DAY | X | VALUE
:---:|:---:|:--:|:---:|:---:
A | 1 | 1 | lo | 1
A | 2 | 1 | la | 2
A | 2 | 15 | lo | 3
B | 1 | 15 | la | 1
"""
if cols_to_keep is None:
cols_to_keep = []
if new_value_cols is None:
new_value_cols = value_cols
if len(value_cols) != len(new_value_cols):
raise ParamsValueError('`value_cols` and `new_value_cols` needs '
'to have the same number of elements')
check_params_columns_duplicate(id_cols + reference_cols + cols_to_keep + value_cols)
levels = list(range(0, len(id_cols)))
df = df.groupby(id_cols + reference_cols + cols_to_keep).sum()
df[new_value_cols] = df.groupby(level=levels)[value_cols].cumsum()
return df.reset_index()
|
def compute_cumsum(
df,
id_cols: List[str],
reference_cols: List[str],
value_cols: List[str],
new_value_cols: List[str] = None,
cols_to_keep: List[str] = None
):
"""
Compute cumsum for a group of columns.
---
### Parameters
*mandatory :*
- `id_cols` (*list*): the columns id to create each group
- `reference_cols` (*list*): the columns to order the cumsum
- `value_cols` (*list*): the columns to cumsum
*optional :*
- `new_value_cols` (*list*): the new columns with the result cumsum
- `cols_to_keep` (*list*): other columns to keep in the dataset.
This option can be used if there is only one row by group [id_cols + reference_cols]
---
### Example
**Input**
MONTH | DAY | NAME | VALUE | X
:---:|:---:|:--:|:---:|:---:
1 | 1 | A | 1 | lo
2 | 1 | A | 1 | lo
2 | 15 | A | 1 | la
1 | 15 | B | 1 | la
```cson
compute_cumsum:
id_cols: ['NAME']
reference_cols: ['MONTH', 'DAY']
cumsum_cols: ['VALUE']
cols_to_keep: ['X']
```
**Output**
NAME | MONTH | DAY | X | VALUE
:---:|:---:|:--:|:---:|:---:
A | 1 | 1 | lo | 1
A | 2 | 1 | la | 2
A | 2 | 15 | lo | 3
B | 1 | 15 | la | 1
"""
if cols_to_keep is None:
cols_to_keep = []
if new_value_cols is None:
new_value_cols = value_cols
if len(value_cols) != len(new_value_cols):
raise ParamsValueError('`value_cols` and `new_value_cols` needs '
'to have the same number of elements')
check_params_columns_duplicate(id_cols + reference_cols + cols_to_keep + value_cols)
levels = list(range(0, len(id_cols)))
df = df.groupby(id_cols + reference_cols + cols_to_keep).sum()
df[new_value_cols] = df.groupby(level=levels)[value_cols].cumsum()
return df.reset_index()
|
[
"Compute",
"cumsum",
"for",
"a",
"group",
"of",
"columns",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/generic/compute_cumsum.py#L9-L80
|
[
"def",
"compute_cumsum",
"(",
"df",
",",
"id_cols",
":",
"List",
"[",
"str",
"]",
",",
"reference_cols",
":",
"List",
"[",
"str",
"]",
",",
"value_cols",
":",
"List",
"[",
"str",
"]",
",",
"new_value_cols",
":",
"List",
"[",
"str",
"]",
"=",
"None",
",",
"cols_to_keep",
":",
"List",
"[",
"str",
"]",
"=",
"None",
")",
":",
"if",
"cols_to_keep",
"is",
"None",
":",
"cols_to_keep",
"=",
"[",
"]",
"if",
"new_value_cols",
"is",
"None",
":",
"new_value_cols",
"=",
"value_cols",
"if",
"len",
"(",
"value_cols",
")",
"!=",
"len",
"(",
"new_value_cols",
")",
":",
"raise",
"ParamsValueError",
"(",
"'`value_cols` and `new_value_cols` needs '",
"'to have the same number of elements'",
")",
"check_params_columns_duplicate",
"(",
"id_cols",
"+",
"reference_cols",
"+",
"cols_to_keep",
"+",
"value_cols",
")",
"levels",
"=",
"list",
"(",
"range",
"(",
"0",
",",
"len",
"(",
"id_cols",
")",
")",
")",
"df",
"=",
"df",
".",
"groupby",
"(",
"id_cols",
"+",
"reference_cols",
"+",
"cols_to_keep",
")",
".",
"sum",
"(",
")",
"df",
"[",
"new_value_cols",
"]",
"=",
"df",
".",
"groupby",
"(",
"level",
"=",
"levels",
")",
"[",
"value_cols",
"]",
".",
"cumsum",
"(",
")",
"return",
"df",
".",
"reset_index",
"(",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
combine_columns_aggregation
|
Aggregates data to reproduce "All" category for requester
---
### Parameters
*mandatory :*
- `id_cols` (*list*): the columns id to group
- `cols_for_combination` (*dict*): colums corresponding to
the filters as key and their default value as value
*optional :*
- `agg_func` (*str*, *list* or *dict*): the function(s) to use for aggregating the data.
Accepted combinations are:
- string function name
- list of functions and/or function names, e.g. [np.sum, 'mean']
- dict of axis labels -> functions, function names or list of such.
|
toucan_data_sdk/utils/generic/combine_columns_aggregation.py
|
def combine_columns_aggregation(
df,
id_cols: List[str],
cols_for_combination: Dict[str, str],
agg_func: Union[str, List[str], Dict[str, str]] = 'sum'
):
"""
Aggregates data to reproduce "All" category for requester
---
### Parameters
*mandatory :*
- `id_cols` (*list*): the columns id to group
- `cols_for_combination` (*dict*): colums corresponding to
the filters as key and their default value as value
*optional :*
- `agg_func` (*str*, *list* or *dict*): the function(s) to use for aggregating the data.
Accepted combinations are:
- string function name
- list of functions and/or function names, e.g. [np.sum, 'mean']
- dict of axis labels -> functions, function names or list of such.
"""
requesters_cols = list(cols_for_combination.keys())
requester_combination = [
list(item) for i in range(0, len(requesters_cols) + 1)
for item in itertools.combinations(requesters_cols, i)]
dfs_result = []
for comb in requester_combination:
df_tmp = df.groupby(id_cols + comb).agg(agg_func).reset_index()
for key in (set(cols_for_combination.keys()) - set(comb)):
df_tmp[key] = cols_for_combination[key]
dfs_result.append(df_tmp)
return pd.concat(dfs_result, sort=False, ignore_index=True)
|
def combine_columns_aggregation(
df,
id_cols: List[str],
cols_for_combination: Dict[str, str],
agg_func: Union[str, List[str], Dict[str, str]] = 'sum'
):
"""
Aggregates data to reproduce "All" category for requester
---
### Parameters
*mandatory :*
- `id_cols` (*list*): the columns id to group
- `cols_for_combination` (*dict*): colums corresponding to
the filters as key and their default value as value
*optional :*
- `agg_func` (*str*, *list* or *dict*): the function(s) to use for aggregating the data.
Accepted combinations are:
- string function name
- list of functions and/or function names, e.g. [np.sum, 'mean']
- dict of axis labels -> functions, function names or list of such.
"""
requesters_cols = list(cols_for_combination.keys())
requester_combination = [
list(item) for i in range(0, len(requesters_cols) + 1)
for item in itertools.combinations(requesters_cols, i)]
dfs_result = []
for comb in requester_combination:
df_tmp = df.groupby(id_cols + comb).agg(agg_func).reset_index()
for key in (set(cols_for_combination.keys()) - set(comb)):
df_tmp[key] = cols_for_combination[key]
dfs_result.append(df_tmp)
return pd.concat(dfs_result, sort=False, ignore_index=True)
|
[
"Aggregates",
"data",
"to",
"reproduce",
"All",
"category",
"for",
"requester"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/generic/combine_columns_aggregation.py#L7-L43
|
[
"def",
"combine_columns_aggregation",
"(",
"df",
",",
"id_cols",
":",
"List",
"[",
"str",
"]",
",",
"cols_for_combination",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
",",
"agg_func",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
",",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"'sum'",
")",
":",
"requesters_cols",
"=",
"list",
"(",
"cols_for_combination",
".",
"keys",
"(",
")",
")",
"requester_combination",
"=",
"[",
"list",
"(",
"item",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"requesters_cols",
")",
"+",
"1",
")",
"for",
"item",
"in",
"itertools",
".",
"combinations",
"(",
"requesters_cols",
",",
"i",
")",
"]",
"dfs_result",
"=",
"[",
"]",
"for",
"comb",
"in",
"requester_combination",
":",
"df_tmp",
"=",
"df",
".",
"groupby",
"(",
"id_cols",
"+",
"comb",
")",
".",
"agg",
"(",
"agg_func",
")",
".",
"reset_index",
"(",
")",
"for",
"key",
"in",
"(",
"set",
"(",
"cols_for_combination",
".",
"keys",
"(",
")",
")",
"-",
"set",
"(",
"comb",
")",
")",
":",
"df_tmp",
"[",
"key",
"]",
"=",
"cols_for_combination",
"[",
"key",
"]",
"dfs_result",
".",
"append",
"(",
"df_tmp",
")",
"return",
"pd",
".",
"concat",
"(",
"dfs_result",
",",
"sort",
"=",
"False",
",",
"ignore_index",
"=",
"True",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
get_param_value_from_func_call
|
Get the value of a function's parameter based on its signature
and the call's args and kwargs.
Example:
>>> def foo(a, b, c=3, d=4):
... pass
...
>>> # what would be the value of "c" when calling foo(1, b=2, c=33) ?
>>> get_param_value_from_func_call('c', foo, [1], {'b': 2, 'c': 33})
33
|
toucan_data_sdk/utils/helpers.py
|
def get_param_value_from_func_call(param_name, func, call_args, call_kwargs):
"""
Get the value of a function's parameter based on its signature
and the call's args and kwargs.
Example:
>>> def foo(a, b, c=3, d=4):
... pass
...
>>> # what would be the value of "c" when calling foo(1, b=2, c=33) ?
>>> get_param_value_from_func_call('c', foo, [1], {'b': 2, 'c': 33})
33
"""
signature = inspect.signature(func)
params_list = signature.parameters.keys()
if param_name not in params_list:
raise TypeError(f"'{param_name}' not found in {func.__name__}"
f"parameters list ([{params_list}])")
call = signature.bind(*call_args, **call_kwargs)
call.apply_defaults()
return call.arguments[param_name]
|
def get_param_value_from_func_call(param_name, func, call_args, call_kwargs):
"""
Get the value of a function's parameter based on its signature
and the call's args and kwargs.
Example:
>>> def foo(a, b, c=3, d=4):
... pass
...
>>> # what would be the value of "c" when calling foo(1, b=2, c=33) ?
>>> get_param_value_from_func_call('c', foo, [1], {'b': 2, 'c': 33})
33
"""
signature = inspect.signature(func)
params_list = signature.parameters.keys()
if param_name not in params_list:
raise TypeError(f"'{param_name}' not found in {func.__name__}"
f"parameters list ([{params_list}])")
call = signature.bind(*call_args, **call_kwargs)
call.apply_defaults()
return call.arguments[param_name]
|
[
"Get",
"the",
"value",
"of",
"a",
"function",
"s",
"parameter",
"based",
"on",
"its",
"signature",
"and",
"the",
"call",
"s",
"args",
"and",
"kwargs",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/helpers.py#L25-L45
|
[
"def",
"get_param_value_from_func_call",
"(",
"param_name",
",",
"func",
",",
"call_args",
",",
"call_kwargs",
")",
":",
"signature",
"=",
"inspect",
".",
"signature",
"(",
"func",
")",
"params_list",
"=",
"signature",
".",
"parameters",
".",
"keys",
"(",
")",
"if",
"param_name",
"not",
"in",
"params_list",
":",
"raise",
"TypeError",
"(",
"f\"'{param_name}' not found in {func.__name__}\"",
"f\"parameters list ([{params_list}])\"",
")",
"call",
"=",
"signature",
".",
"bind",
"(",
"*",
"call_args",
",",
"*",
"*",
"call_kwargs",
")",
"call",
".",
"apply_defaults",
"(",
")",
"return",
"call",
".",
"arguments",
"[",
"param_name",
"]"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
get_func_sourcecode
|
Try to get sourcecode using standard inspect.getsource().
If the function comes from a module which has been created dynamically
(not from the filesystem), then it tries to read the sourcecode on the
filesystem anyway.
WARNING: can do weird things if the filesystem code slightly differs from
the original module code.
|
toucan_data_sdk/utils/helpers.py
|
def get_func_sourcecode(func):
"""
Try to get sourcecode using standard inspect.getsource().
If the function comes from a module which has been created dynamically
(not from the filesystem), then it tries to read the sourcecode on the
filesystem anyway.
WARNING: can do weird things if the filesystem code slightly differs from
the original module code.
"""
def getsource(func):
lines, lnum = getsourcelines(func)
return ''.join(lines)
def getsourcelines(func):
lines, lnum = findsource(func)
return inspect.getblock(lines[lnum:]), lnum + 1
def findsource(func):
file = getfile(func) # file path
module = inspect.getmodule(func, file)
lines = linecache.getlines(file, module.__dict__)
code = func.__code__
lnum = code.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(\s*async\s+def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]):
break
lnum = lnum - 1 # pragma: no cover
return lines, lnum
def getfile(func):
module = inspect.getmodule(func)
return module.__file__
try:
return inspect.getsource(func)
except Exception:
return getsource(func)
|
def get_func_sourcecode(func):
"""
Try to get sourcecode using standard inspect.getsource().
If the function comes from a module which has been created dynamically
(not from the filesystem), then it tries to read the sourcecode on the
filesystem anyway.
WARNING: can do weird things if the filesystem code slightly differs from
the original module code.
"""
def getsource(func):
lines, lnum = getsourcelines(func)
return ''.join(lines)
def getsourcelines(func):
lines, lnum = findsource(func)
return inspect.getblock(lines[lnum:]), lnum + 1
def findsource(func):
file = getfile(func) # file path
module = inspect.getmodule(func, file)
lines = linecache.getlines(file, module.__dict__)
code = func.__code__
lnum = code.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(\s*async\s+def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]):
break
lnum = lnum - 1 # pragma: no cover
return lines, lnum
def getfile(func):
module = inspect.getmodule(func)
return module.__file__
try:
return inspect.getsource(func)
except Exception:
return getsource(func)
|
[
"Try",
"to",
"get",
"sourcecode",
"using",
"standard",
"inspect",
".",
"getsource",
"()",
".",
"If",
"the",
"function",
"comes",
"from",
"a",
"module",
"which",
"has",
"been",
"created",
"dynamically",
"(",
"not",
"from",
"the",
"filesystem",
")",
"then",
"it",
"tries",
"to",
"read",
"the",
"sourcecode",
"on",
"the",
"filesystem",
"anyway",
".",
"WARNING",
":",
"can",
"do",
"weird",
"things",
"if",
"the",
"filesystem",
"code",
"slightly",
"differs",
"from",
"the",
"original",
"module",
"code",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/helpers.py#L48-L86
|
[
"def",
"get_func_sourcecode",
"(",
"func",
")",
":",
"def",
"getsource",
"(",
"func",
")",
":",
"lines",
",",
"lnum",
"=",
"getsourcelines",
"(",
"func",
")",
"return",
"''",
".",
"join",
"(",
"lines",
")",
"def",
"getsourcelines",
"(",
"func",
")",
":",
"lines",
",",
"lnum",
"=",
"findsource",
"(",
"func",
")",
"return",
"inspect",
".",
"getblock",
"(",
"lines",
"[",
"lnum",
":",
"]",
")",
",",
"lnum",
"+",
"1",
"def",
"findsource",
"(",
"func",
")",
":",
"file",
"=",
"getfile",
"(",
"func",
")",
"# file path",
"module",
"=",
"inspect",
".",
"getmodule",
"(",
"func",
",",
"file",
")",
"lines",
"=",
"linecache",
".",
"getlines",
"(",
"file",
",",
"module",
".",
"__dict__",
")",
"code",
"=",
"func",
".",
"__code__",
"lnum",
"=",
"code",
".",
"co_firstlineno",
"-",
"1",
"pat",
"=",
"re",
".",
"compile",
"(",
"r'^(\\s*def\\s)|(\\s*async\\s+def\\s)|(.*(?<!\\w)lambda(:|\\s))|^(\\s*@)'",
")",
"while",
"lnum",
">",
"0",
":",
"if",
"pat",
".",
"match",
"(",
"lines",
"[",
"lnum",
"]",
")",
":",
"break",
"lnum",
"=",
"lnum",
"-",
"1",
"# pragma: no cover",
"return",
"lines",
",",
"lnum",
"def",
"getfile",
"(",
"func",
")",
":",
"module",
"=",
"inspect",
".",
"getmodule",
"(",
"func",
")",
"return",
"module",
".",
"__file__",
"try",
":",
"return",
"inspect",
".",
"getsource",
"(",
"func",
")",
"except",
"Exception",
":",
"return",
"getsource",
"(",
"func",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
slugify
|
Returns a slugified name (we allow _ to be used)
|
toucan_data_sdk/utils/helpers.py
|
def slugify(name, separator='-'):
"""Returns a slugified name (we allow _ to be used)"""
return _slugify(name, regex_pattern=re.compile('[^-_a-z0-9]+'), separator=separator)
|
def slugify(name, separator='-'):
"""Returns a slugified name (we allow _ to be used)"""
return _slugify(name, regex_pattern=re.compile('[^-_a-z0-9]+'), separator=separator)
|
[
"Returns",
"a",
"slugified",
"name",
"(",
"we",
"allow",
"_",
"to",
"be",
"used",
")"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/helpers.py#L99-L101
|
[
"def",
"slugify",
"(",
"name",
",",
"separator",
"=",
"'-'",
")",
":",
"return",
"_slugify",
"(",
"name",
",",
"regex_pattern",
"=",
"re",
".",
"compile",
"(",
"'[^-_a-z0-9]+'",
")",
",",
"separator",
"=",
"separator",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
resolve_dependencies
|
Given a function name and a mapping of function dependencies,
returns a list of *all* the dependencies for this function.
|
toucan_data_sdk/utils/helpers.py
|
def resolve_dependencies(func_name, dependencies):
""" Given a function name and a mapping of function dependencies,
returns a list of *all* the dependencies for this function. """
def _resolve_deps(func_name, func_deps):
""" Append dependencies recursively to func_deps (accumulator) """
if func_name in func_deps:
return
func_deps.append(func_name)
for dep in dependencies.get(func_name, []):
_resolve_deps(dep, func_deps)
func_deps = []
_resolve_deps(func_name, func_deps)
return sorted(func_deps)
|
def resolve_dependencies(func_name, dependencies):
""" Given a function name and a mapping of function dependencies,
returns a list of *all* the dependencies for this function. """
def _resolve_deps(func_name, func_deps):
""" Append dependencies recursively to func_deps (accumulator) """
if func_name in func_deps:
return
func_deps.append(func_name)
for dep in dependencies.get(func_name, []):
_resolve_deps(dep, func_deps)
func_deps = []
_resolve_deps(func_name, func_deps)
return sorted(func_deps)
|
[
"Given",
"a",
"function",
"name",
"and",
"a",
"mapping",
"of",
"function",
"dependencies",
"returns",
"a",
"list",
"of",
"*",
"all",
"*",
"the",
"dependencies",
"for",
"this",
"function",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/helpers.py#L104-L119
|
[
"def",
"resolve_dependencies",
"(",
"func_name",
",",
"dependencies",
")",
":",
"def",
"_resolve_deps",
"(",
"func_name",
",",
"func_deps",
")",
":",
"\"\"\" Append dependencies recursively to func_deps (accumulator) \"\"\"",
"if",
"func_name",
"in",
"func_deps",
":",
"return",
"func_deps",
".",
"append",
"(",
"func_name",
")",
"for",
"dep",
"in",
"dependencies",
".",
"get",
"(",
"func_name",
",",
"[",
"]",
")",
":",
"_resolve_deps",
"(",
"dep",
",",
"func_deps",
")",
"func_deps",
"=",
"[",
"]",
"_resolve_deps",
"(",
"func_name",
",",
"func_deps",
")",
"return",
"sorted",
"(",
"func_deps",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
clean_cachedir_old_entries
|
Remove old entries from the cache
|
toucan_data_sdk/utils/helpers.py
|
def clean_cachedir_old_entries(cachedir: StoreBackendBase, func_name: str, limit: int) -> int:
"""Remove old entries from the cache"""
if limit < 1:
raise ValueError("'limit' must be greater or equal to 1")
cache_entries = get_cachedir_entries(cachedir, func_name)
cache_entries = sorted(cache_entries, key=lambda e: e.last_access, reverse=True)
cache_entries_to_remove = cache_entries[limit:]
for entry in cache_entries_to_remove:
shutil.rmtree(entry.path, ignore_errors=True)
return len(cache_entries_to_remove)
|
def clean_cachedir_old_entries(cachedir: StoreBackendBase, func_name: str, limit: int) -> int:
"""Remove old entries from the cache"""
if limit < 1:
raise ValueError("'limit' must be greater or equal to 1")
cache_entries = get_cachedir_entries(cachedir, func_name)
cache_entries = sorted(cache_entries, key=lambda e: e.last_access, reverse=True)
cache_entries_to_remove = cache_entries[limit:]
for entry in cache_entries_to_remove:
shutil.rmtree(entry.path, ignore_errors=True)
return len(cache_entries_to_remove)
|
[
"Remove",
"old",
"entries",
"from",
"the",
"cache"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/helpers.py#L122-L133
|
[
"def",
"clean_cachedir_old_entries",
"(",
"cachedir",
":",
"StoreBackendBase",
",",
"func_name",
":",
"str",
",",
"limit",
":",
"int",
")",
"->",
"int",
":",
"if",
"limit",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"'limit' must be greater or equal to 1\"",
")",
"cache_entries",
"=",
"get_cachedir_entries",
"(",
"cachedir",
",",
"func_name",
")",
"cache_entries",
"=",
"sorted",
"(",
"cache_entries",
",",
"key",
"=",
"lambda",
"e",
":",
"e",
".",
"last_access",
",",
"reverse",
"=",
"True",
")",
"cache_entries_to_remove",
"=",
"cache_entries",
"[",
"limit",
":",
"]",
"for",
"entry",
"in",
"cache_entries_to_remove",
":",
"shutil",
".",
"rmtree",
"(",
"entry",
".",
"path",
",",
"ignore_errors",
"=",
"True",
")",
"return",
"len",
"(",
"cache_entries_to_remove",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
roll_up
|
Creates aggregates following a given hierarchy
---
### Parameters
*mandatory :*
- `levels` (*list of str*): name of the columns composing the hierarchy (from the top to the bottom level).
- `groupby_vars` (*list of str*): name of the columns with value to aggregate.
- `extra_groupby_cols` (*list of str*) optional: other columns used to group in each level.
*optional :*
- `var_name` (*str*) : name of the result variable column. By default, `“type”`.
- `value_name` (*str*): name of the result value column. By default, `“value”`.
- `agg_func` (*str*): name of the aggregation operation. By default, `“sum”`.
- `drop_levels` (*list of str*): the names of the levels that you may want to discard from the output.
---
### Example
**Input**
| Region | City | Population |
|:---------:|:--------:|:-----------:|
| Idf | Panam| 200 |
| Idf | Antony | 50 |
| Nord | Lille | 20 |
```cson
roll_up:
levels: ["Region", "City"]
groupby_vars: "Population"
```
**Output**
| Region | City | Population | value | type |
|:---------:|:--------:|:-----------:|:--------:|:------:|
| Idf | Panam| 200 | Panam | City |
| Idf | Antony | 50 | Antony | City |
| Nord | Lille | 20 | Lille | City |
| Idf | Nan | 250 | Idf | Region |
| Nord | Nan | 20 | Nord | Region |
|
toucan_data_sdk/utils/generic/roll_up.py
|
def roll_up(
df,
levels: List[str],
groupby_vars: List[str],
extra_groupby_cols: List[str] = None,
var_name: str = 'type',
value_name: str = 'value',
agg_func: str = 'sum',
drop_levels: List[str] = None
):
"""
Creates aggregates following a given hierarchy
---
### Parameters
*mandatory :*
- `levels` (*list of str*): name of the columns composing the hierarchy (from the top to the bottom level).
- `groupby_vars` (*list of str*): name of the columns with value to aggregate.
- `extra_groupby_cols` (*list of str*) optional: other columns used to group in each level.
*optional :*
- `var_name` (*str*) : name of the result variable column. By default, `“type”`.
- `value_name` (*str*): name of the result value column. By default, `“value”`.
- `agg_func` (*str*): name of the aggregation operation. By default, `“sum”`.
- `drop_levels` (*list of str*): the names of the levels that you may want to discard from the output.
---
### Example
**Input**
| Region | City | Population |
|:---------:|:--------:|:-----------:|
| Idf | Panam| 200 |
| Idf | Antony | 50 |
| Nord | Lille | 20 |
```cson
roll_up:
levels: ["Region", "City"]
groupby_vars: "Population"
```
**Output**
| Region | City | Population | value | type |
|:---------:|:--------:|:-----------:|:--------:|:------:|
| Idf | Panam| 200 | Panam | City |
| Idf | Antony | 50 | Antony | City |
| Nord | Lille | 20 | Lille | City |
| Idf | Nan | 250 | Idf | Region |
| Nord | Nan | 20 | Nord | Region |
"""
dfs = list()
groupby_cols_cpy = list(levels)
levels_cpy = list(levels)
levels_cpy.reverse()
extra_groupby_cols = extra_groupby_cols or []
drop_levels = drop_levels or []
previous_level = None
for top_level in levels_cpy:
# Aggregation
gb_df = getattr(
df.groupby(groupby_cols_cpy + extra_groupby_cols)[groupby_vars],
agg_func)().reset_index()
# Melt-like columns
gb_df[var_name] = top_level
gb_df[value_name] = gb_df[top_level]
dfs.append(gb_df)
if previous_level in drop_levels:
del dfs[-2]
previous_level = top_level
# Remove one level each time in the groupby: lowest level column needs
# a groupby with every levels, the next level needs every one except
# the lowest, etc. until the top level column that needs only itself
# inside the groupby.
groupby_cols_cpy.pop()
return pd.concat(dfs, sort=False).reset_index()
|
def roll_up(
df,
levels: List[str],
groupby_vars: List[str],
extra_groupby_cols: List[str] = None,
var_name: str = 'type',
value_name: str = 'value',
agg_func: str = 'sum',
drop_levels: List[str] = None
):
"""
Creates aggregates following a given hierarchy
---
### Parameters
*mandatory :*
- `levels` (*list of str*): name of the columns composing the hierarchy (from the top to the bottom level).
- `groupby_vars` (*list of str*): name of the columns with value to aggregate.
- `extra_groupby_cols` (*list of str*) optional: other columns used to group in each level.
*optional :*
- `var_name` (*str*) : name of the result variable column. By default, `“type”`.
- `value_name` (*str*): name of the result value column. By default, `“value”`.
- `agg_func` (*str*): name of the aggregation operation. By default, `“sum”`.
- `drop_levels` (*list of str*): the names of the levels that you may want to discard from the output.
---
### Example
**Input**
| Region | City | Population |
|:---------:|:--------:|:-----------:|
| Idf | Panam| 200 |
| Idf | Antony | 50 |
| Nord | Lille | 20 |
```cson
roll_up:
levels: ["Region", "City"]
groupby_vars: "Population"
```
**Output**
| Region | City | Population | value | type |
|:---------:|:--------:|:-----------:|:--------:|:------:|
| Idf | Panam| 200 | Panam | City |
| Idf | Antony | 50 | Antony | City |
| Nord | Lille | 20 | Lille | City |
| Idf | Nan | 250 | Idf | Region |
| Nord | Nan | 20 | Nord | Region |
"""
dfs = list()
groupby_cols_cpy = list(levels)
levels_cpy = list(levels)
levels_cpy.reverse()
extra_groupby_cols = extra_groupby_cols or []
drop_levels = drop_levels or []
previous_level = None
for top_level in levels_cpy:
# Aggregation
gb_df = getattr(
df.groupby(groupby_cols_cpy + extra_groupby_cols)[groupby_vars],
agg_func)().reset_index()
# Melt-like columns
gb_df[var_name] = top_level
gb_df[value_name] = gb_df[top_level]
dfs.append(gb_df)
if previous_level in drop_levels:
del dfs[-2]
previous_level = top_level
# Remove one level each time in the groupby: lowest level column needs
# a groupby with every levels, the next level needs every one except
# the lowest, etc. until the top level column that needs only itself
# inside the groupby.
groupby_cols_cpy.pop()
return pd.concat(dfs, sort=False).reset_index()
|
[
"Creates",
"aggregates",
"following",
"a",
"given",
"hierarchy"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/generic/roll_up.py#L5-L88
|
[
"def",
"roll_up",
"(",
"df",
",",
"levels",
":",
"List",
"[",
"str",
"]",
",",
"groupby_vars",
":",
"List",
"[",
"str",
"]",
",",
"extra_groupby_cols",
":",
"List",
"[",
"str",
"]",
"=",
"None",
",",
"var_name",
":",
"str",
"=",
"'type'",
",",
"value_name",
":",
"str",
"=",
"'value'",
",",
"agg_func",
":",
"str",
"=",
"'sum'",
",",
"drop_levels",
":",
"List",
"[",
"str",
"]",
"=",
"None",
")",
":",
"dfs",
"=",
"list",
"(",
")",
"groupby_cols_cpy",
"=",
"list",
"(",
"levels",
")",
"levels_cpy",
"=",
"list",
"(",
"levels",
")",
"levels_cpy",
".",
"reverse",
"(",
")",
"extra_groupby_cols",
"=",
"extra_groupby_cols",
"or",
"[",
"]",
"drop_levels",
"=",
"drop_levels",
"or",
"[",
"]",
"previous_level",
"=",
"None",
"for",
"top_level",
"in",
"levels_cpy",
":",
"# Aggregation",
"gb_df",
"=",
"getattr",
"(",
"df",
".",
"groupby",
"(",
"groupby_cols_cpy",
"+",
"extra_groupby_cols",
")",
"[",
"groupby_vars",
"]",
",",
"agg_func",
")",
"(",
")",
".",
"reset_index",
"(",
")",
"# Melt-like columns",
"gb_df",
"[",
"var_name",
"]",
"=",
"top_level",
"gb_df",
"[",
"value_name",
"]",
"=",
"gb_df",
"[",
"top_level",
"]",
"dfs",
".",
"append",
"(",
"gb_df",
")",
"if",
"previous_level",
"in",
"drop_levels",
":",
"del",
"dfs",
"[",
"-",
"2",
"]",
"previous_level",
"=",
"top_level",
"# Remove one level each time in the groupby: lowest level column needs",
"# a groupby with every levels, the next level needs every one except",
"# the lowest, etc. until the top level column that needs only itself",
"# inside the groupby.",
"groupby_cols_cpy",
".",
"pop",
"(",
")",
"return",
"pd",
".",
"concat",
"(",
"dfs",
",",
"sort",
"=",
"False",
")",
".",
"reset_index",
"(",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
argmax
|
Keep the row of the data corresponding to the maximal value in a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column containing the value you want to keep the maximum
*optional :*
- `groups` (*str or list(str)*): name of the column(s) used for 'groupby' logic
(the function will return the argmax by group)
---
### Example
**Input**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 250 |
| toto | wave 1 | 2016 | 450 |
```cson
argmax:
column: 'year'
```
**Output**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2016 | 450 |
|
toucan_data_sdk/utils/postprocess/argmax.py
|
def argmax(df, column: str, groups: Union[str, List[str]] = None):
"""
Keep the row of the data corresponding to the maximal value in a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column containing the value you want to keep the maximum
*optional :*
- `groups` (*str or list(str)*): name of the column(s) used for 'groupby' logic
(the function will return the argmax by group)
---
### Example
**Input**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 250 |
| toto | wave 1 | 2016 | 450 |
```cson
argmax:
column: 'year'
```
**Output**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2016 | 450 |
"""
if groups is None:
df = df[df[column] == df[column].max()].reset_index(drop=True)
else:
group_max = df.groupby(groups)[column].transform('max')
df = (df
.loc[df[column] == group_max, :]
.drop_duplicates()
.reset_index(drop=True)
)
return df
|
def argmax(df, column: str, groups: Union[str, List[str]] = None):
"""
Keep the row of the data corresponding to the maximal value in a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column containing the value you want to keep the maximum
*optional :*
- `groups` (*str or list(str)*): name of the column(s) used for 'groupby' logic
(the function will return the argmax by group)
---
### Example
**Input**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 250 |
| toto | wave 1 | 2016 | 450 |
```cson
argmax:
column: 'year'
```
**Output**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2016 | 450 |
"""
if groups is None:
df = df[df[column] == df[column].max()].reset_index(drop=True)
else:
group_max = df.groupby(groups)[column].transform('max')
df = (df
.loc[df[column] == group_max, :]
.drop_duplicates()
.reset_index(drop=True)
)
return df
|
[
"Keep",
"the",
"row",
"of",
"the",
"data",
"corresponding",
"to",
"the",
"maximal",
"value",
"in",
"a",
"column"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/argmax.py#L4-L51
|
[
"def",
"argmax",
"(",
"df",
",",
"column",
":",
"str",
",",
"groups",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
")",
":",
"if",
"groups",
"is",
"None",
":",
"df",
"=",
"df",
"[",
"df",
"[",
"column",
"]",
"==",
"df",
"[",
"column",
"]",
".",
"max",
"(",
")",
"]",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
"else",
":",
"group_max",
"=",
"df",
".",
"groupby",
"(",
"groups",
")",
"[",
"column",
"]",
".",
"transform",
"(",
"'max'",
")",
"df",
"=",
"(",
"df",
".",
"loc",
"[",
"df",
"[",
"column",
"]",
"==",
"group_max",
",",
":",
"]",
".",
"drop_duplicates",
"(",
")",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
argmin
|
Keep the row of the data corresponding to the minimal value in a column
---
### Parameters
*mandatory :*
- `column` (str): name of the column containing the value you want to keep the minimum
*optional :*
- `groups` (*str or list(str)*): name of the column(s) used for 'groupby' logic
(the function will return the argmax by group)
---
### Example
**Input**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 250 |
| toto | wave 1 | 2016 | 450 |
```cson
argmin:
column: 'year'
]
```
**Output**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2015 | 250 |
|
toucan_data_sdk/utils/postprocess/argmax.py
|
def argmin(df, column: str, groups: Union[str, List[str]] = None):
"""
Keep the row of the data corresponding to the minimal value in a column
---
### Parameters
*mandatory :*
- `column` (str): name of the column containing the value you want to keep the minimum
*optional :*
- `groups` (*str or list(str)*): name of the column(s) used for 'groupby' logic
(the function will return the argmax by group)
---
### Example
**Input**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 250 |
| toto | wave 1 | 2016 | 450 |
```cson
argmin:
column: 'year'
]
```
**Output**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2015 | 250 |
"""
if groups is None:
df = df[df[column] == df[column].min()].reset_index(drop=True)
else:
group_min = df.groupby(groups)[column].transform('min')
df = (df
.loc[df[column] == group_min, :]
.drop_duplicates()
.reset_index(drop=True)
)
return df
|
def argmin(df, column: str, groups: Union[str, List[str]] = None):
"""
Keep the row of the data corresponding to the minimal value in a column
---
### Parameters
*mandatory :*
- `column` (str): name of the column containing the value you want to keep the minimum
*optional :*
- `groups` (*str or list(str)*): name of the column(s) used for 'groupby' logic
(the function will return the argmax by group)
---
### Example
**Input**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 250 |
| toto | wave 1 | 2016 | 450 |
```cson
argmin:
column: 'year'
]
```
**Output**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2015 | 250 |
"""
if groups is None:
df = df[df[column] == df[column].min()].reset_index(drop=True)
else:
group_min = df.groupby(groups)[column].transform('min')
df = (df
.loc[df[column] == group_min, :]
.drop_duplicates()
.reset_index(drop=True)
)
return df
|
[
"Keep",
"the",
"row",
"of",
"the",
"data",
"corresponding",
"to",
"the",
"minimal",
"value",
"in",
"a",
"column"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/argmax.py#L54-L101
|
[
"def",
"argmin",
"(",
"df",
",",
"column",
":",
"str",
",",
"groups",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
")",
":",
"if",
"groups",
"is",
"None",
":",
"df",
"=",
"df",
"[",
"df",
"[",
"column",
"]",
"==",
"df",
"[",
"column",
"]",
".",
"min",
"(",
")",
"]",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
"else",
":",
"group_min",
"=",
"df",
".",
"groupby",
"(",
"groups",
")",
"[",
"column",
"]",
".",
"transform",
"(",
"'min'",
")",
"df",
"=",
"(",
"df",
".",
"loc",
"[",
"df",
"[",
"column",
"]",
"==",
"group_min",
",",
":",
"]",
".",
"drop_duplicates",
"(",
")",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
fillna
|
Can fill NaN values from a column with a given value or a column
---
### Parameters
- `column` (*str*): name of column you want to fill
- `value`: NaN will be replaced by this value
- `column_value`: NaN will be replaced by value from this column
*NOTE*: You must set either the 'value' parameter or the 'column_value' parameter
---
### Example
**Input**
| variable | wave | year | my_value |
|:--------:|:-------:|:--------:|:--------:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | |
| toto | wave 1 | 2016 | 450 |
```cson
fillna:
column: 'my_value'
value: 0
```
**Output**
| variable | wave | year | my_value |
|:--------:|:-------:|:--------:|:--------:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 0 |
| toto | wave 1 | 2016 | 450 |
|
toucan_data_sdk/utils/postprocess/fillna.py
|
def fillna(df, column: str, value=None, column_value=None):
"""
Can fill NaN values from a column with a given value or a column
---
### Parameters
- `column` (*str*): name of column you want to fill
- `value`: NaN will be replaced by this value
- `column_value`: NaN will be replaced by value from this column
*NOTE*: You must set either the 'value' parameter or the 'column_value' parameter
---
### Example
**Input**
| variable | wave | year | my_value |
|:--------:|:-------:|:--------:|:--------:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | |
| toto | wave 1 | 2016 | 450 |
```cson
fillna:
column: 'my_value'
value: 0
```
**Output**
| variable | wave | year | my_value |
|:--------:|:-------:|:--------:|:--------:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 0 |
| toto | wave 1 | 2016 | 450 |
"""
if column not in df.columns:
df[column] = nan
if value is not None and column_value is not None:
raise ValueError('You cannot set both the parameters value and column_value')
if value is not None:
df[column] = df[column].fillna(value)
if column_value is not None:
if column_value not in df.columns:
raise ValueError(f'"{column_value}" is not a valid column name')
df[column] = df[column].fillna(df[column_value])
return df
|
def fillna(df, column: str, value=None, column_value=None):
"""
Can fill NaN values from a column with a given value or a column
---
### Parameters
- `column` (*str*): name of column you want to fill
- `value`: NaN will be replaced by this value
- `column_value`: NaN will be replaced by value from this column
*NOTE*: You must set either the 'value' parameter or the 'column_value' parameter
---
### Example
**Input**
| variable | wave | year | my_value |
|:--------:|:-------:|:--------:|:--------:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | |
| toto | wave 1 | 2016 | 450 |
```cson
fillna:
column: 'my_value'
value: 0
```
**Output**
| variable | wave | year | my_value |
|:--------:|:-------:|:--------:|:--------:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 0 |
| toto | wave 1 | 2016 | 450 |
"""
if column not in df.columns:
df[column] = nan
if value is not None and column_value is not None:
raise ValueError('You cannot set both the parameters value and column_value')
if value is not None:
df[column] = df[column].fillna(value)
if column_value is not None:
if column_value not in df.columns:
raise ValueError(f'"{column_value}" is not a valid column name')
df[column] = df[column].fillna(df[column_value])
return df
|
[
"Can",
"fill",
"NaN",
"values",
"from",
"a",
"column",
"with",
"a",
"given",
"value",
"or",
"a",
"column"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/fillna.py#L4-L58
|
[
"def",
"fillna",
"(",
"df",
",",
"column",
":",
"str",
",",
"value",
"=",
"None",
",",
"column_value",
"=",
"None",
")",
":",
"if",
"column",
"not",
"in",
"df",
".",
"columns",
":",
"df",
"[",
"column",
"]",
"=",
"nan",
"if",
"value",
"is",
"not",
"None",
"and",
"column_value",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'You cannot set both the parameters value and column_value'",
")",
"if",
"value",
"is",
"not",
"None",
":",
"df",
"[",
"column",
"]",
"=",
"df",
"[",
"column",
"]",
".",
"fillna",
"(",
"value",
")",
"if",
"column_value",
"is",
"not",
"None",
":",
"if",
"column_value",
"not",
"in",
"df",
".",
"columns",
":",
"raise",
"ValueError",
"(",
"f'\"{column_value}\" is not a valid column name'",
")",
"df",
"[",
"column",
"]",
"=",
"df",
"[",
"column",
"]",
".",
"fillna",
"(",
"df",
"[",
"column_value",
"]",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
date_requester_generator
|
From a dataset containing dates in a column, return a dataset
with at least 3 columns :
- "DATE" : Label of date
- "DATETIME" : Date in datetime dtype
- "GRANULARITY" : Granularity of date
---
### Parameters
*mandatory :*
- `date_column` (*str*): name of column containing the date in the dataframe
- `frequency` (*str*): see [pandas doc](
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases)
*optional :*
- `date_column_format` (*str*): format of the date in date_column
- `format` (*str*): format of the date (see [pandas doc](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
By default, the format is set to `'%d/%m/%Y'`
**WARNING**: only use if `granularities` is None.
- `granularities` (*dict*):
- key (*str*): name of the granularity
- value (*str*): Format of the granularity e.g. '%d/%m/%Y' (see [pandas doc](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
- `others_format` (*dict*) : Add new columns for each key
- key (*str*) : name of the column
- value (*str*): format of the granularity e.g. '%d/%m/%Y' (see [pandas doc](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
- `times_delta` (*dict*) : Add new columns for each key
- key (*str*) : name of the column
- value (*str*): time delta (e.g. '+1 day', '+3 day', '-4 month')
---
### Example
**Input**
date | kpi
:---:|:-----:
2018-01-01 | 1
2018-01-05 | 2
2018-01-04 | 3
2018-01-03 | 4
2018-01-02 | 5
```cson
date_requester_generator:
date_column: 'date'
frequency: 'D'
granularities:
'day': '%d/%m/%Y'
'Semaine': '%W'
others_format:
'year': '%Y'
```
**Ouput**
DATE | DATETIME | GRANULARITY | year
:---------:|:----------:|:-----------:|:---:
01/01/2018 | 2018-01-01 | day | 2018
02/01/2018 | 2018-01-02 | day | 2018
03/01/2018 | 2018-01-03 | day | 2018
04/01/2018 | 2018-01-04 | day | 2018
05/01/2018 | 2018-01-05 | day | 2018
01 | 2018-01-01 | Semaine | 2018
|
toucan_data_sdk/utils/generic/date_requester.py
|
def date_requester_generator(
df: pd.DataFrame,
date_column: str,
frequency: str,
date_column_format: str = None,
format: str = '%Y-%m-%d',
granularities: Dict[str, str] = None,
others_format: Dict[str, str] = None,
times_delta: Dict[str, str] = None
) -> pd.DataFrame:
"""
From a dataset containing dates in a column, return a dataset
with at least 3 columns :
- "DATE" : Label of date
- "DATETIME" : Date in datetime dtype
- "GRANULARITY" : Granularity of date
---
### Parameters
*mandatory :*
- `date_column` (*str*): name of column containing the date in the dataframe
- `frequency` (*str*): see [pandas doc](
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases)
*optional :*
- `date_column_format` (*str*): format of the date in date_column
- `format` (*str*): format of the date (see [pandas doc](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
By default, the format is set to `'%d/%m/%Y'`
**WARNING**: only use if `granularities` is None.
- `granularities` (*dict*):
- key (*str*): name of the granularity
- value (*str*): Format of the granularity e.g. '%d/%m/%Y' (see [pandas doc](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
- `others_format` (*dict*) : Add new columns for each key
- key (*str*) : name of the column
- value (*str*): format of the granularity e.g. '%d/%m/%Y' (see [pandas doc](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
- `times_delta` (*dict*) : Add new columns for each key
- key (*str*) : name of the column
- value (*str*): time delta (e.g. '+1 day', '+3 day', '-4 month')
---
### Example
**Input**
date | kpi
:---:|:-----:
2018-01-01 | 1
2018-01-05 | 2
2018-01-04 | 3
2018-01-03 | 4
2018-01-02 | 5
```cson
date_requester_generator:
date_column: 'date'
frequency: 'D'
granularities:
'day': '%d/%m/%Y'
'Semaine': '%W'
others_format:
'year': '%Y'
```
**Ouput**
DATE | DATETIME | GRANULARITY | year
:---------:|:----------:|:-----------:|:---:
01/01/2018 | 2018-01-01 | day | 2018
02/01/2018 | 2018-01-02 | day | 2018
03/01/2018 | 2018-01-03 | day | 2018
04/01/2018 | 2018-01-04 | day | 2018
05/01/2018 | 2018-01-05 | day | 2018
01 | 2018-01-01 | Semaine | 2018
"""
start_date = pd.to_datetime(df[date_column], format=date_column_format).min()
end_date = pd.to_datetime(df[date_column], format=date_column_format).max()
granularities = granularities or {'date': format}
others_format = others_format or {}
times_delta = times_delta or {}
# Base DataFrame
columns_list = ['DATE', 'DATETIME', 'GRANULARITY', *others_format, *times_delta]
result_df = {col_name: [] for col_name in columns_list}
# Generate the range
date_range = pd.date_range(start=start_date, end=end_date, freq=frequency)
for granularity_name, granularity_format in granularities.items():
date_range_label = date_range.strftime(granularity_format)
a = list(set(date_range_label))
index_unique = list(set([a.index(x) for x in date_range_label]))
date_range_datetime = date_range[index_unique]
date_range_label = date_range_label.unique()
result_df['DATE'] += list(date_range_label)
result_df['DATETIME'] += list(date_range_datetime)
result_df['GRANULARITY'] += [granularity_name] * len(date_range_label)
for col_name, other_format in others_format.items():
result_df[col_name] += list(date_range_datetime.strftime(other_format))
for col_name, time_delta in times_delta.items():
result_df[col_name] += list((date_range_datetime + pd.Timedelta(time_delta))
.strftime(granularity_format))
return pd.DataFrame(result_df)
|
def date_requester_generator(
df: pd.DataFrame,
date_column: str,
frequency: str,
date_column_format: str = None,
format: str = '%Y-%m-%d',
granularities: Dict[str, str] = None,
others_format: Dict[str, str] = None,
times_delta: Dict[str, str] = None
) -> pd.DataFrame:
"""
From a dataset containing dates in a column, return a dataset
with at least 3 columns :
- "DATE" : Label of date
- "DATETIME" : Date in datetime dtype
- "GRANULARITY" : Granularity of date
---
### Parameters
*mandatory :*
- `date_column` (*str*): name of column containing the date in the dataframe
- `frequency` (*str*): see [pandas doc](
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases)
*optional :*
- `date_column_format` (*str*): format of the date in date_column
- `format` (*str*): format of the date (see [pandas doc](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
By default, the format is set to `'%d/%m/%Y'`
**WARNING**: only use if `granularities` is None.
- `granularities` (*dict*):
- key (*str*): name of the granularity
- value (*str*): Format of the granularity e.g. '%d/%m/%Y' (see [pandas doc](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
- `others_format` (*dict*) : Add new columns for each key
- key (*str*) : name of the column
- value (*str*): format of the granularity e.g. '%d/%m/%Y' (see [pandas doc](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
- `times_delta` (*dict*) : Add new columns for each key
- key (*str*) : name of the column
- value (*str*): time delta (e.g. '+1 day', '+3 day', '-4 month')
---
### Example
**Input**
date | kpi
:---:|:-----:
2018-01-01 | 1
2018-01-05 | 2
2018-01-04 | 3
2018-01-03 | 4
2018-01-02 | 5
```cson
date_requester_generator:
date_column: 'date'
frequency: 'D'
granularities:
'day': '%d/%m/%Y'
'Semaine': '%W'
others_format:
'year': '%Y'
```
**Ouput**
DATE | DATETIME | GRANULARITY | year
:---------:|:----------:|:-----------:|:---:
01/01/2018 | 2018-01-01 | day | 2018
02/01/2018 | 2018-01-02 | day | 2018
03/01/2018 | 2018-01-03 | day | 2018
04/01/2018 | 2018-01-04 | day | 2018
05/01/2018 | 2018-01-05 | day | 2018
01 | 2018-01-01 | Semaine | 2018
"""
start_date = pd.to_datetime(df[date_column], format=date_column_format).min()
end_date = pd.to_datetime(df[date_column], format=date_column_format).max()
granularities = granularities or {'date': format}
others_format = others_format or {}
times_delta = times_delta or {}
# Base DataFrame
columns_list = ['DATE', 'DATETIME', 'GRANULARITY', *others_format, *times_delta]
result_df = {col_name: [] for col_name in columns_list}
# Generate the range
date_range = pd.date_range(start=start_date, end=end_date, freq=frequency)
for granularity_name, granularity_format in granularities.items():
date_range_label = date_range.strftime(granularity_format)
a = list(set(date_range_label))
index_unique = list(set([a.index(x) for x in date_range_label]))
date_range_datetime = date_range[index_unique]
date_range_label = date_range_label.unique()
result_df['DATE'] += list(date_range_label)
result_df['DATETIME'] += list(date_range_datetime)
result_df['GRANULARITY'] += [granularity_name] * len(date_range_label)
for col_name, other_format in others_format.items():
result_df[col_name] += list(date_range_datetime.strftime(other_format))
for col_name, time_delta in times_delta.items():
result_df[col_name] += list((date_range_datetime + pd.Timedelta(time_delta))
.strftime(granularity_format))
return pd.DataFrame(result_df)
|
[
"From",
"a",
"dataset",
"containing",
"dates",
"in",
"a",
"column",
"return",
"a",
"dataset",
"with",
"at",
"least",
"3",
"columns",
":",
"-",
"DATE",
":",
"Label",
"of",
"date",
"-",
"DATETIME",
":",
"Date",
"in",
"datetime",
"dtype",
"-",
"GRANULARITY",
":",
"Granularity",
"of",
"date"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/generic/date_requester.py#L5-L118
|
[
"def",
"date_requester_generator",
"(",
"df",
":",
"pd",
".",
"DataFrame",
",",
"date_column",
":",
"str",
",",
"frequency",
":",
"str",
",",
"date_column_format",
":",
"str",
"=",
"None",
",",
"format",
":",
"str",
"=",
"'%Y-%m-%d'",
",",
"granularities",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
"=",
"None",
",",
"others_format",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
"=",
"None",
",",
"times_delta",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
"=",
"None",
")",
"->",
"pd",
".",
"DataFrame",
":",
"start_date",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"date_column",
"]",
",",
"format",
"=",
"date_column_format",
")",
".",
"min",
"(",
")",
"end_date",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"date_column",
"]",
",",
"format",
"=",
"date_column_format",
")",
".",
"max",
"(",
")",
"granularities",
"=",
"granularities",
"or",
"{",
"'date'",
":",
"format",
"}",
"others_format",
"=",
"others_format",
"or",
"{",
"}",
"times_delta",
"=",
"times_delta",
"or",
"{",
"}",
"# Base DataFrame",
"columns_list",
"=",
"[",
"'DATE'",
",",
"'DATETIME'",
",",
"'GRANULARITY'",
",",
"*",
"others_format",
",",
"*",
"times_delta",
"]",
"result_df",
"=",
"{",
"col_name",
":",
"[",
"]",
"for",
"col_name",
"in",
"columns_list",
"}",
"# Generate the range",
"date_range",
"=",
"pd",
".",
"date_range",
"(",
"start",
"=",
"start_date",
",",
"end",
"=",
"end_date",
",",
"freq",
"=",
"frequency",
")",
"for",
"granularity_name",
",",
"granularity_format",
"in",
"granularities",
".",
"items",
"(",
")",
":",
"date_range_label",
"=",
"date_range",
".",
"strftime",
"(",
"granularity_format",
")",
"a",
"=",
"list",
"(",
"set",
"(",
"date_range_label",
")",
")",
"index_unique",
"=",
"list",
"(",
"set",
"(",
"[",
"a",
".",
"index",
"(",
"x",
")",
"for",
"x",
"in",
"date_range_label",
"]",
")",
")",
"date_range_datetime",
"=",
"date_range",
"[",
"index_unique",
"]",
"date_range_label",
"=",
"date_range_label",
".",
"unique",
"(",
")",
"result_df",
"[",
"'DATE'",
"]",
"+=",
"list",
"(",
"date_range_label",
")",
"result_df",
"[",
"'DATETIME'",
"]",
"+=",
"list",
"(",
"date_range_datetime",
")",
"result_df",
"[",
"'GRANULARITY'",
"]",
"+=",
"[",
"granularity_name",
"]",
"*",
"len",
"(",
"date_range_label",
")",
"for",
"col_name",
",",
"other_format",
"in",
"others_format",
".",
"items",
"(",
")",
":",
"result_df",
"[",
"col_name",
"]",
"+=",
"list",
"(",
"date_range_datetime",
".",
"strftime",
"(",
"other_format",
")",
")",
"for",
"col_name",
",",
"time_delta",
"in",
"times_delta",
".",
"items",
"(",
")",
":",
"result_df",
"[",
"col_name",
"]",
"+=",
"list",
"(",
"(",
"date_range_datetime",
"+",
"pd",
".",
"Timedelta",
"(",
"time_delta",
")",
")",
".",
"strftime",
"(",
"granularity_format",
")",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"result_df",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
_norm_date
|
normalize symbolic date values (e.g. 'TODAY')
Convert a symbolic value in a valid date.
Currenlty known symbolic values are 'TODAY', 'YESTERDAY' and 'TOMORROW'.
NOTE: This function will return `date` (not `datetime`) instances.
Parameters:
`datestr`: the date to parse, formatted as `date_fmt`
`date_fmt`: expected output date format
Returns:
The interpreted date as a datetime.datetime object.
If `datestr` doesn't match any of the known symbolic names, it just parses it.
|
toucan_data_sdk/utils/postprocess/filter_by_date.py
|
def _norm_date(datestr: str, date_fmt: str) -> date:
"""normalize symbolic date values (e.g. 'TODAY')
Convert a symbolic value in a valid date.
Currenlty known symbolic values are 'TODAY', 'YESTERDAY' and 'TOMORROW'.
NOTE: This function will return `date` (not `datetime`) instances.
Parameters:
`datestr`: the date to parse, formatted as `date_fmt`
`date_fmt`: expected output date format
Returns:
The interpreted date as a datetime.datetime object.
If `datestr` doesn't match any of the known symbolic names, it just parses it.
"""
try:
days = {'TODAY': 0, 'YESTERDAY': -1, 'TOMORROW': 1}[datestr.upper()]
return date.today() + pd.Timedelta(days=days)
except KeyError:
return datetime.strptime(datestr, date_fmt).date()
|
def _norm_date(datestr: str, date_fmt: str) -> date:
"""normalize symbolic date values (e.g. 'TODAY')
Convert a symbolic value in a valid date.
Currenlty known symbolic values are 'TODAY', 'YESTERDAY' and 'TOMORROW'.
NOTE: This function will return `date` (not `datetime`) instances.
Parameters:
`datestr`: the date to parse, formatted as `date_fmt`
`date_fmt`: expected output date format
Returns:
The interpreted date as a datetime.datetime object.
If `datestr` doesn't match any of the known symbolic names, it just parses it.
"""
try:
days = {'TODAY': 0, 'YESTERDAY': -1, 'TOMORROW': 1}[datestr.upper()]
return date.today() + pd.Timedelta(days=days)
except KeyError:
return datetime.strptime(datestr, date_fmt).date()
|
[
"normalize",
"symbolic",
"date",
"values",
"(",
"e",
".",
"g",
".",
"TODAY",
")"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/filter_by_date.py#L13-L34
|
[
"def",
"_norm_date",
"(",
"datestr",
":",
"str",
",",
"date_fmt",
":",
"str",
")",
"->",
"date",
":",
"try",
":",
"days",
"=",
"{",
"'TODAY'",
":",
"0",
",",
"'YESTERDAY'",
":",
"-",
"1",
",",
"'TOMORROW'",
":",
"1",
"}",
"[",
"datestr",
".",
"upper",
"(",
")",
"]",
"return",
"date",
".",
"today",
"(",
")",
"+",
"pd",
".",
"Timedelta",
"(",
"days",
"=",
"days",
")",
"except",
"KeyError",
":",
"return",
"datetime",
".",
"strptime",
"(",
"datestr",
",",
"date_fmt",
")",
".",
"date",
"(",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
add_offset
|
add a human readable offset to `dateobj` and return corresponding date.
rely on `pandas.Timedelta` and add the following extra shortcuts:
- "w", "week" and "weeks" for a week (i.e. 7days)
- "month', "months" for a month (i.e. no day computation, just increment the month)
- "y", "year', "years" for a year (i.e. no day computation, just increment the year)
|
toucan_data_sdk/utils/postprocess/filter_by_date.py
|
def add_offset(dateobj, hr_offset: str, sign: str):
"""add a human readable offset to `dateobj` and return corresponding date.
rely on `pandas.Timedelta` and add the following extra shortcuts:
- "w", "week" and "weeks" for a week (i.e. 7days)
- "month', "months" for a month (i.e. no day computation, just increment the month)
- "y", "year', "years" for a year (i.e. no day computation, just increment the year)
"""
sign_coeff = 1 if sign == '+' else -1
try:
return dateobj + sign_coeff * pd.Timedelta(hr_offset)
except ValueError:
# pd.Timedelta could not parse the offset, let's try harder
match = TIMEDELTA_RGX.match(hr_offset)
if match is not None:
groups = match.groupdict()
unit = groups['unit'].lower()[0]
num = sign_coeff * int(groups['num'])
# is it a week ?
if unit == 'w':
return dateobj + num * timedelta(weeks=1)
# or a month ?
if unit == 'm':
return add_months(dateobj, num)
# or a year ?
if unit == 'y':
return add_years(dateobj, num)
# we did what we could, just re-raise the original exception
raise
|
def add_offset(dateobj, hr_offset: str, sign: str):
"""add a human readable offset to `dateobj` and return corresponding date.
rely on `pandas.Timedelta` and add the following extra shortcuts:
- "w", "week" and "weeks" for a week (i.e. 7days)
- "month', "months" for a month (i.e. no day computation, just increment the month)
- "y", "year', "years" for a year (i.e. no day computation, just increment the year)
"""
sign_coeff = 1 if sign == '+' else -1
try:
return dateobj + sign_coeff * pd.Timedelta(hr_offset)
except ValueError:
# pd.Timedelta could not parse the offset, let's try harder
match = TIMEDELTA_RGX.match(hr_offset)
if match is not None:
groups = match.groupdict()
unit = groups['unit'].lower()[0]
num = sign_coeff * int(groups['num'])
# is it a week ?
if unit == 'w':
return dateobj + num * timedelta(weeks=1)
# or a month ?
if unit == 'm':
return add_months(dateobj, num)
# or a year ?
if unit == 'y':
return add_years(dateobj, num)
# we did what we could, just re-raise the original exception
raise
|
[
"add",
"a",
"human",
"readable",
"offset",
"to",
"dateobj",
"and",
"return",
"corresponding",
"date",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/filter_by_date.py#L37-L65
|
[
"def",
"add_offset",
"(",
"dateobj",
",",
"hr_offset",
":",
"str",
",",
"sign",
":",
"str",
")",
":",
"sign_coeff",
"=",
"1",
"if",
"sign",
"==",
"'+'",
"else",
"-",
"1",
"try",
":",
"return",
"dateobj",
"+",
"sign_coeff",
"*",
"pd",
".",
"Timedelta",
"(",
"hr_offset",
")",
"except",
"ValueError",
":",
"# pd.Timedelta could not parse the offset, let's try harder",
"match",
"=",
"TIMEDELTA_RGX",
".",
"match",
"(",
"hr_offset",
")",
"if",
"match",
"is",
"not",
"None",
":",
"groups",
"=",
"match",
".",
"groupdict",
"(",
")",
"unit",
"=",
"groups",
"[",
"'unit'",
"]",
".",
"lower",
"(",
")",
"[",
"0",
"]",
"num",
"=",
"sign_coeff",
"*",
"int",
"(",
"groups",
"[",
"'num'",
"]",
")",
"# is it a week ?",
"if",
"unit",
"==",
"'w'",
":",
"return",
"dateobj",
"+",
"num",
"*",
"timedelta",
"(",
"weeks",
"=",
"1",
")",
"# or a month ?",
"if",
"unit",
"==",
"'m'",
":",
"return",
"add_months",
"(",
"dateobj",
",",
"num",
")",
"# or a year ?",
"if",
"unit",
"==",
"'y'",
":",
"return",
"add_years",
"(",
"dateobj",
",",
"num",
")",
"# we did what we could, just re-raise the original exception",
"raise"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
add_months
|
return `dateobj` + `nb_months`
If landing date doesn't exist (e.g. february, 30th), return the last
day of the landing month.
>>> add_months(date(2018, 1, 1), 1)
datetime.date(2018, 1, 1)
>>> add_months(date(2018, 1, 1), -1)
datetime.date(2017, 12, 1)
>>> add_months(date(2018, 1, 1), 25)
datetime.date(2020, 2, 1)
>>> add_months(date(2018, 1, 1), -25)
datetime.date(2015, 12, 1)
>>> add_months(date(2018, 1, 31), 1)
datetime.date(2018, 2, 28)
|
toucan_data_sdk/utils/postprocess/filter_by_date.py
|
def add_months(dateobj, nb_months: int):
"""return `dateobj` + `nb_months`
If landing date doesn't exist (e.g. february, 30th), return the last
day of the landing month.
>>> add_months(date(2018, 1, 1), 1)
datetime.date(2018, 1, 1)
>>> add_months(date(2018, 1, 1), -1)
datetime.date(2017, 12, 1)
>>> add_months(date(2018, 1, 1), 25)
datetime.date(2020, 2, 1)
>>> add_months(date(2018, 1, 1), -25)
datetime.date(2015, 12, 1)
>>> add_months(date(2018, 1, 31), 1)
datetime.date(2018, 2, 28)
"""
nb_years, nb_months = divmod(nb_months, 12)
month = dateobj.month + nb_months
if month > 12:
nb_years += 1
month -= 12
year = dateobj.year + nb_years
lastday = monthrange(year, month)[1]
return dateobj.replace(year=year, month=month, day=min(lastday, dateobj.day))
|
def add_months(dateobj, nb_months: int):
"""return `dateobj` + `nb_months`
If landing date doesn't exist (e.g. february, 30th), return the last
day of the landing month.
>>> add_months(date(2018, 1, 1), 1)
datetime.date(2018, 1, 1)
>>> add_months(date(2018, 1, 1), -1)
datetime.date(2017, 12, 1)
>>> add_months(date(2018, 1, 1), 25)
datetime.date(2020, 2, 1)
>>> add_months(date(2018, 1, 1), -25)
datetime.date(2015, 12, 1)
>>> add_months(date(2018, 1, 31), 1)
datetime.date(2018, 2, 28)
"""
nb_years, nb_months = divmod(nb_months, 12)
month = dateobj.month + nb_months
if month > 12:
nb_years += 1
month -= 12
year = dateobj.year + nb_years
lastday = monthrange(year, month)[1]
return dateobj.replace(year=year, month=month, day=min(lastday, dateobj.day))
|
[
"return",
"dateobj",
"+",
"nb_months"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/filter_by_date.py#L68-L92
|
[
"def",
"add_months",
"(",
"dateobj",
",",
"nb_months",
":",
"int",
")",
":",
"nb_years",
",",
"nb_months",
"=",
"divmod",
"(",
"nb_months",
",",
"12",
")",
"month",
"=",
"dateobj",
".",
"month",
"+",
"nb_months",
"if",
"month",
">",
"12",
":",
"nb_years",
"+=",
"1",
"month",
"-=",
"12",
"year",
"=",
"dateobj",
".",
"year",
"+",
"nb_years",
"lastday",
"=",
"monthrange",
"(",
"year",
",",
"month",
")",
"[",
"1",
"]",
"return",
"dateobj",
".",
"replace",
"(",
"year",
"=",
"year",
",",
"month",
"=",
"month",
",",
"day",
"=",
"min",
"(",
"lastday",
",",
"dateobj",
".",
"day",
")",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
add_years
|
return `dateobj` + `nb_years`
If landing date doesn't exist (e.g. february, 30th), return the last
day of the landing month.
>>> add_years(date(2018, 1, 1), 1)
datetime.date(2019, 1, 1)
>>> add_years(date(2018, 1, 1), -1)
datetime.date(2017, 1, 1)
>>> add_years(date(2020, 2, 29), 1)
datetime.date(2021, 2, 28)
>>> add_years(date(2020, 2, 29), -1)
datetime.date(2019, 2, 28)
|
toucan_data_sdk/utils/postprocess/filter_by_date.py
|
def add_years(dateobj, nb_years):
"""return `dateobj` + `nb_years`
If landing date doesn't exist (e.g. february, 30th), return the last
day of the landing month.
>>> add_years(date(2018, 1, 1), 1)
datetime.date(2019, 1, 1)
>>> add_years(date(2018, 1, 1), -1)
datetime.date(2017, 1, 1)
>>> add_years(date(2020, 2, 29), 1)
datetime.date(2021, 2, 28)
>>> add_years(date(2020, 2, 29), -1)
datetime.date(2019, 2, 28)
"""
year = dateobj.year + nb_years
lastday = monthrange(year, dateobj.month)[1]
return dateobj.replace(year=year, day=min(lastday, dateobj.day))
|
def add_years(dateobj, nb_years):
"""return `dateobj` + `nb_years`
If landing date doesn't exist (e.g. february, 30th), return the last
day of the landing month.
>>> add_years(date(2018, 1, 1), 1)
datetime.date(2019, 1, 1)
>>> add_years(date(2018, 1, 1), -1)
datetime.date(2017, 1, 1)
>>> add_years(date(2020, 2, 29), 1)
datetime.date(2021, 2, 28)
>>> add_years(date(2020, 2, 29), -1)
datetime.date(2019, 2, 28)
"""
year = dateobj.year + nb_years
lastday = monthrange(year, dateobj.month)[1]
return dateobj.replace(year=year, day=min(lastday, dateobj.day))
|
[
"return",
"dateobj",
"+",
"nb_years"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/filter_by_date.py#L95-L112
|
[
"def",
"add_years",
"(",
"dateobj",
",",
"nb_years",
")",
":",
"year",
"=",
"dateobj",
".",
"year",
"+",
"nb_years",
"lastday",
"=",
"monthrange",
"(",
"year",
",",
"dateobj",
".",
"month",
")",
"[",
"1",
"]",
"return",
"dateobj",
".",
"replace",
"(",
"year",
"=",
"year",
",",
"day",
"=",
"min",
"(",
"lastday",
",",
"dateobj",
".",
"day",
")",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
parse_date
|
parse `datestr` and return corresponding date object.
`datestr` should be a string matching `date_fmt` and parseable by `strptime`
but some offset can also be added using `(datestr) + OFFSET` or `(datestr) -
OFFSET` syntax. When using this syntax, `OFFSET` should be understable by
`pandas.Timedelta` (cf.
http://pandas.pydata.org/pandas-docs/stable/timedeltas.html) and `w`, `week`
`month` and `year` offset keywords are also accepted. `datestr` MUST be wrapped
with parenthesis.
Additionally, the following symbolic names are supported: `TODAY`,
`YESTERDAY`, `TOMORROW`.
Example usage:
>>> parse_date('2018-01-01', '%Y-%m-%d') datetime.date(2018, 1, 1)
parse_date('(2018-01-01) + 1day', '%Y-%m-%d') datetime.date(2018, 1, 2)
parse_date('(2018-01-01) + 2weeks', '%Y-%m-%d') datetime.date(2018, 1, 15)
Parameters: `datestr`: the date to parse, formatted as `date_fmt`
`date_fmt`: expected date format
Returns: The `date` object. If date could not be parsed, a ValueError will
be raised.
|
toucan_data_sdk/utils/postprocess/filter_by_date.py
|
def parse_date(datestr: str, date_fmt: str) -> date:
"""parse `datestr` and return corresponding date object.
`datestr` should be a string matching `date_fmt` and parseable by `strptime`
but some offset can also be added using `(datestr) + OFFSET` or `(datestr) -
OFFSET` syntax. When using this syntax, `OFFSET` should be understable by
`pandas.Timedelta` (cf.
http://pandas.pydata.org/pandas-docs/stable/timedeltas.html) and `w`, `week`
`month` and `year` offset keywords are also accepted. `datestr` MUST be wrapped
with parenthesis.
Additionally, the following symbolic names are supported: `TODAY`,
`YESTERDAY`, `TOMORROW`.
Example usage:
>>> parse_date('2018-01-01', '%Y-%m-%d') datetime.date(2018, 1, 1)
parse_date('(2018-01-01) + 1day', '%Y-%m-%d') datetime.date(2018, 1, 2)
parse_date('(2018-01-01) + 2weeks', '%Y-%m-%d') datetime.date(2018, 1, 15)
Parameters: `datestr`: the date to parse, formatted as `date_fmt`
`date_fmt`: expected date format
Returns: The `date` object. If date could not be parsed, a ValueError will
be raised.
"""
rgx = re.compile(r'\((?P<date>.*)\)(\s*(?P<sign>[+-])(?P<offset>.*))?$')
datestr = datestr.strip()
match = rgx.match(datestr)
# if regexp doesn't match, date must match the expected format
if match is None:
return _norm_date(datestr, date_fmt)
datestr = match.group('date').strip()
dateobj = _norm_date(datestr, date_fmt)
offset = match.group('offset')
if offset:
return add_offset(dateobj, offset, match.group('sign'))
return dateobj
|
def parse_date(datestr: str, date_fmt: str) -> date:
"""parse `datestr` and return corresponding date object.
`datestr` should be a string matching `date_fmt` and parseable by `strptime`
but some offset can also be added using `(datestr) + OFFSET` or `(datestr) -
OFFSET` syntax. When using this syntax, `OFFSET` should be understable by
`pandas.Timedelta` (cf.
http://pandas.pydata.org/pandas-docs/stable/timedeltas.html) and `w`, `week`
`month` and `year` offset keywords are also accepted. `datestr` MUST be wrapped
with parenthesis.
Additionally, the following symbolic names are supported: `TODAY`,
`YESTERDAY`, `TOMORROW`.
Example usage:
>>> parse_date('2018-01-01', '%Y-%m-%d') datetime.date(2018, 1, 1)
parse_date('(2018-01-01) + 1day', '%Y-%m-%d') datetime.date(2018, 1, 2)
parse_date('(2018-01-01) + 2weeks', '%Y-%m-%d') datetime.date(2018, 1, 15)
Parameters: `datestr`: the date to parse, formatted as `date_fmt`
`date_fmt`: expected date format
Returns: The `date` object. If date could not be parsed, a ValueError will
be raised.
"""
rgx = re.compile(r'\((?P<date>.*)\)(\s*(?P<sign>[+-])(?P<offset>.*))?$')
datestr = datestr.strip()
match = rgx.match(datestr)
# if regexp doesn't match, date must match the expected format
if match is None:
return _norm_date(datestr, date_fmt)
datestr = match.group('date').strip()
dateobj = _norm_date(datestr, date_fmt)
offset = match.group('offset')
if offset:
return add_offset(dateobj, offset, match.group('sign'))
return dateobj
|
[
"parse",
"datestr",
"and",
"return",
"corresponding",
"date",
"object",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/filter_by_date.py#L115-L152
|
[
"def",
"parse_date",
"(",
"datestr",
":",
"str",
",",
"date_fmt",
":",
"str",
")",
"->",
"date",
":",
"rgx",
"=",
"re",
".",
"compile",
"(",
"r'\\((?P<date>.*)\\)(\\s*(?P<sign>[+-])(?P<offset>.*))?$'",
")",
"datestr",
"=",
"datestr",
".",
"strip",
"(",
")",
"match",
"=",
"rgx",
".",
"match",
"(",
"datestr",
")",
"# if regexp doesn't match, date must match the expected format",
"if",
"match",
"is",
"None",
":",
"return",
"_norm_date",
"(",
"datestr",
",",
"date_fmt",
")",
"datestr",
"=",
"match",
".",
"group",
"(",
"'date'",
")",
".",
"strip",
"(",
")",
"dateobj",
"=",
"_norm_date",
"(",
"datestr",
",",
"date_fmt",
")",
"offset",
"=",
"match",
".",
"group",
"(",
"'offset'",
")",
"if",
"offset",
":",
"return",
"add_offset",
"(",
"dateobj",
",",
"offset",
",",
"match",
".",
"group",
"(",
"'sign'",
")",
")",
"return",
"dateobj"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
filter_by_date
|
Filter dataframe your data by date.
This function will interpret `start`, `stop` and `atdate` and build
the corresponding date range. The caller must specify either:
- `atdate`: keep all rows matching this date exactly,
- `start`: keep all rows matching this date onwards.
- `stop`: keep all rows matching dates before this one.
- `start` and `stop`: keep all rows between `start` and `stop`,
Any other combination will raise an error. The lower bound of the date range
will be included, the upper bound will be excluded.
When specified, `start`, `stop` and `atdate` values are expected to match the
`date_format` format or a known symbolic value (i.e. 'TODAY', 'YESTERDAY' or 'TOMORROW').
Additionally, the offset syntax "(date) + offset" is also supported (Mind
the parenthesis around the date string). In that case, the offset must be
one of the syntax supported by `pandas.Timedelta` (see [pandas doc](
http://pandas.pydata.org/pandas-docs/stable/timedeltas.html))
---
### Parameters
*mandatory :*
- `date_col` (*str*): the name of the dataframe's column to filter on
*optional :*
- `date_format` (*str*): expected date format in column `date_col` (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior)
- `start` (*str*): if specified, lower bound (included) of the date range
- `stop` (*str*): if specified, upper bound (excluded) of the date range
- `atdate` (*str*): if specified, the exact date we're filtering on
|
toucan_data_sdk/utils/postprocess/filter_by_date.py
|
def filter_by_date(
df,
date_col: str,
date_format: str = '%Y-%m-%d',
start: str = None,
stop: str = None,
atdate: str = None
):
"""
Filter dataframe your data by date.
This function will interpret `start`, `stop` and `atdate` and build
the corresponding date range. The caller must specify either:
- `atdate`: keep all rows matching this date exactly,
- `start`: keep all rows matching this date onwards.
- `stop`: keep all rows matching dates before this one.
- `start` and `stop`: keep all rows between `start` and `stop`,
Any other combination will raise an error. The lower bound of the date range
will be included, the upper bound will be excluded.
When specified, `start`, `stop` and `atdate` values are expected to match the
`date_format` format or a known symbolic value (i.e. 'TODAY', 'YESTERDAY' or 'TOMORROW').
Additionally, the offset syntax "(date) + offset" is also supported (Mind
the parenthesis around the date string). In that case, the offset must be
one of the syntax supported by `pandas.Timedelta` (see [pandas doc](
http://pandas.pydata.org/pandas-docs/stable/timedeltas.html))
---
### Parameters
*mandatory :*
- `date_col` (*str*): the name of the dataframe's column to filter on
*optional :*
- `date_format` (*str*): expected date format in column `date_col` (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior)
- `start` (*str*): if specified, lower bound (included) of the date range
- `stop` (*str*): if specified, upper bound (excluded) of the date range
- `atdate` (*str*): if specified, the exact date we're filtering on
"""
mask = None
if start is None and stop is None and atdate is None:
raise TypeError('either "start", "stop" or "atdate" must be specified')
if start is not None and atdate is not None:
raise TypeError('"start" and "atdate" are mutually exclusive')
if stop is not None and atdate is not None:
raise TypeError('"stop" and "atdate" are mutually exclusive')
# add a new column that will hold actual date objects instead of strings.
# This column is just temporary and will be removed before returning the
# filtered dataframe.
filtercol = str(uuid4())
df[filtercol] = pd.to_datetime(df[date_col], format=date_format)
if atdate is not None:
mask = df[filtercol] == parse_date(atdate, date_format)
elif start is not None and stop is not None:
mask = ((df[filtercol] >= parse_date(start, date_format)) &
(df[filtercol] < parse_date(stop, date_format)))
elif stop is None:
mask = df[filtercol] >= parse_date(start, date_format)
elif start is None:
mask = df[filtercol] < parse_date(stop, date_format)
return df[mask].drop(filtercol, axis=1)
|
def filter_by_date(
df,
date_col: str,
date_format: str = '%Y-%m-%d',
start: str = None,
stop: str = None,
atdate: str = None
):
"""
Filter dataframe your data by date.
This function will interpret `start`, `stop` and `atdate` and build
the corresponding date range. The caller must specify either:
- `atdate`: keep all rows matching this date exactly,
- `start`: keep all rows matching this date onwards.
- `stop`: keep all rows matching dates before this one.
- `start` and `stop`: keep all rows between `start` and `stop`,
Any other combination will raise an error. The lower bound of the date range
will be included, the upper bound will be excluded.
When specified, `start`, `stop` and `atdate` values are expected to match the
`date_format` format or a known symbolic value (i.e. 'TODAY', 'YESTERDAY' or 'TOMORROW').
Additionally, the offset syntax "(date) + offset" is also supported (Mind
the parenthesis around the date string). In that case, the offset must be
one of the syntax supported by `pandas.Timedelta` (see [pandas doc](
http://pandas.pydata.org/pandas-docs/stable/timedeltas.html))
---
### Parameters
*mandatory :*
- `date_col` (*str*): the name of the dataframe's column to filter on
*optional :*
- `date_format` (*str*): expected date format in column `date_col` (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior)
- `start` (*str*): if specified, lower bound (included) of the date range
- `stop` (*str*): if specified, upper bound (excluded) of the date range
- `atdate` (*str*): if specified, the exact date we're filtering on
"""
mask = None
if start is None and stop is None and atdate is None:
raise TypeError('either "start", "stop" or "atdate" must be specified')
if start is not None and atdate is not None:
raise TypeError('"start" and "atdate" are mutually exclusive')
if stop is not None and atdate is not None:
raise TypeError('"stop" and "atdate" are mutually exclusive')
# add a new column that will hold actual date objects instead of strings.
# This column is just temporary and will be removed before returning the
# filtered dataframe.
filtercol = str(uuid4())
df[filtercol] = pd.to_datetime(df[date_col], format=date_format)
if atdate is not None:
mask = df[filtercol] == parse_date(atdate, date_format)
elif start is not None and stop is not None:
mask = ((df[filtercol] >= parse_date(start, date_format)) &
(df[filtercol] < parse_date(stop, date_format)))
elif stop is None:
mask = df[filtercol] >= parse_date(start, date_format)
elif start is None:
mask = df[filtercol] < parse_date(stop, date_format)
return df[mask].drop(filtercol, axis=1)
|
[
"Filter",
"dataframe",
"your",
"data",
"by",
"date",
"."
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/filter_by_date.py#L155-L220
|
[
"def",
"filter_by_date",
"(",
"df",
",",
"date_col",
":",
"str",
",",
"date_format",
":",
"str",
"=",
"'%Y-%m-%d'",
",",
"start",
":",
"str",
"=",
"None",
",",
"stop",
":",
"str",
"=",
"None",
",",
"atdate",
":",
"str",
"=",
"None",
")",
":",
"mask",
"=",
"None",
"if",
"start",
"is",
"None",
"and",
"stop",
"is",
"None",
"and",
"atdate",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"'either \"start\", \"stop\" or \"atdate\" must be specified'",
")",
"if",
"start",
"is",
"not",
"None",
"and",
"atdate",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"'\"start\" and \"atdate\" are mutually exclusive'",
")",
"if",
"stop",
"is",
"not",
"None",
"and",
"atdate",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"'\"stop\" and \"atdate\" are mutually exclusive'",
")",
"# add a new column that will hold actual date objects instead of strings.",
"# This column is just temporary and will be removed before returning the",
"# filtered dataframe.",
"filtercol",
"=",
"str",
"(",
"uuid4",
"(",
")",
")",
"df",
"[",
"filtercol",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"date_col",
"]",
",",
"format",
"=",
"date_format",
")",
"if",
"atdate",
"is",
"not",
"None",
":",
"mask",
"=",
"df",
"[",
"filtercol",
"]",
"==",
"parse_date",
"(",
"atdate",
",",
"date_format",
")",
"elif",
"start",
"is",
"not",
"None",
"and",
"stop",
"is",
"not",
"None",
":",
"mask",
"=",
"(",
"(",
"df",
"[",
"filtercol",
"]",
">=",
"parse_date",
"(",
"start",
",",
"date_format",
")",
")",
"&",
"(",
"df",
"[",
"filtercol",
"]",
"<",
"parse_date",
"(",
"stop",
",",
"date_format",
")",
")",
")",
"elif",
"stop",
"is",
"None",
":",
"mask",
"=",
"df",
"[",
"filtercol",
"]",
">=",
"parse_date",
"(",
"start",
",",
"date_format",
")",
"elif",
"start",
"is",
"None",
":",
"mask",
"=",
"df",
"[",
"filtercol",
"]",
"<",
"parse_date",
"(",
"stop",
",",
"date_format",
")",
"return",
"df",
"[",
"mask",
"]",
".",
"drop",
"(",
"filtercol",
",",
"axis",
"=",
"1",
")"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
replace
|
Change the label of a value or a columns within your data source.
(Similar to `rename` but does not have the notion of locale)
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to modify.
- `to_replace` (*dict*): keys of this dict are old values pointing on substitute.
*optional :*
- `new_column` (*str*): name of the output column. By default the `column` arguments is modified.
**Note**: extra parameters can be used (see [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.replace.html))
---
### Example
**Input**
article | rating
:------:|:------:
book | 1
puzzle | 3
food | 5
We want to split the ratings in three categories: "good", "average" and "poor".
```cson
replace:
column: "rating"
new_column: "rating_category" # create a new column with replaced data
to_replace:
1: "poor"
2: "poor"
3: "average"
4: "good"
5: "good"
```
**Ouput**
article | rating | rating_category
:------:|:------:|:--------------:
book | 1 | poor
puzzle | 3 | average
food | 5 | good
|
toucan_data_sdk/utils/postprocess/replace.py
|
def replace(df, column: str, new_column: str = None, **kwargs):
"""
Change the label of a value or a columns within your data source.
(Similar to `rename` but does not have the notion of locale)
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to modify.
- `to_replace` (*dict*): keys of this dict are old values pointing on substitute.
*optional :*
- `new_column` (*str*): name of the output column. By default the `column` arguments is modified.
**Note**: extra parameters can be used (see [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.replace.html))
---
### Example
**Input**
article | rating
:------:|:------:
book | 1
puzzle | 3
food | 5
We want to split the ratings in three categories: "good", "average" and "poor".
```cson
replace:
column: "rating"
new_column: "rating_category" # create a new column with replaced data
to_replace:
1: "poor"
2: "poor"
3: "average"
4: "good"
5: "good"
```
**Ouput**
article | rating | rating_category
:------:|:------:|:--------------:
book | 1 | poor
puzzle | 3 | average
food | 5 | good
"""
new_column = new_column or column
df.loc[:, new_column] = df[column].replace(**kwargs)
return df
|
def replace(df, column: str, new_column: str = None, **kwargs):
"""
Change the label of a value or a columns within your data source.
(Similar to `rename` but does not have the notion of locale)
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to modify.
- `to_replace` (*dict*): keys of this dict are old values pointing on substitute.
*optional :*
- `new_column` (*str*): name of the output column. By default the `column` arguments is modified.
**Note**: extra parameters can be used (see [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.replace.html))
---
### Example
**Input**
article | rating
:------:|:------:
book | 1
puzzle | 3
food | 5
We want to split the ratings in three categories: "good", "average" and "poor".
```cson
replace:
column: "rating"
new_column: "rating_category" # create a new column with replaced data
to_replace:
1: "poor"
2: "poor"
3: "average"
4: "good"
5: "good"
```
**Ouput**
article | rating | rating_category
:------:|:------:|:--------------:
book | 1 | poor
puzzle | 3 | average
food | 5 | good
"""
new_column = new_column or column
df.loc[:, new_column] = df[column].replace(**kwargs)
return df
|
[
"Change",
"the",
"label",
"of",
"a",
"value",
"or",
"a",
"columns",
"within",
"your",
"data",
"source",
".",
"(",
"Similar",
"to",
"rename",
"but",
"does",
"not",
"have",
"the",
"notion",
"of",
"locale",
")"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/replace.py#L1-L56
|
[
"def",
"replace",
"(",
"df",
",",
"column",
":",
"str",
",",
"new_column",
":",
"str",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"new_column",
"=",
"new_column",
"or",
"column",
"df",
".",
"loc",
"[",
":",
",",
"new_column",
"]",
"=",
"df",
"[",
"column",
"]",
".",
"replace",
"(",
"*",
"*",
"kwargs",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
percentage
|
Add a column to the dataframe according to the groupby logic on group_cols
---
### Parameters
*mandatory :*
- `column` (*str*): name of the desired column you need percentage on
*optional :*
- `group_cols` (*list*): names of columns for the groupby logic
- `new_column` (*str*): name of the output column. By default `column` will be overwritten.
---
**Input**
| gender | sport | number |
|:------:|:----------:|:------:|
| male | bicycle | 17 |
| female | basketball | 17 |
| male | basketball | 3 |
| female | football | 7 |
| female | running | 30 |
| male | running | 20 |
| male | football | 21 |
| female | bicycle | 17 |
```cson
percentage:
new_column: 'number_percentage'
column: 'number'
group_cols: ['sport']
```
**Output**
| gender | sport | number | number_percentage |
|:------:|:----------:|:------:|:-----------------:|
| male | bicycle | 17 | 50.0 |
| female | basketball | 17 | 85.0 |
| male | basketball | 3 | 15.0 |
| female | football | 7 | 25.0 |
| female | running | 30 | 60.0 |
| male | running | 20 | 40.0 |
| male | football | 21 | 75.0 |
| female | bicycle | 17 | 50.0 |
|
toucan_data_sdk/utils/postprocess/percentage.py
|
def percentage(
df,
column: str,
group_cols: Union[str, List[str]] = None,
new_column: str = None
):
"""
Add a column to the dataframe according to the groupby logic on group_cols
---
### Parameters
*mandatory :*
- `column` (*str*): name of the desired column you need percentage on
*optional :*
- `group_cols` (*list*): names of columns for the groupby logic
- `new_column` (*str*): name of the output column. By default `column` will be overwritten.
---
**Input**
| gender | sport | number |
|:------:|:----------:|:------:|
| male | bicycle | 17 |
| female | basketball | 17 |
| male | basketball | 3 |
| female | football | 7 |
| female | running | 30 |
| male | running | 20 |
| male | football | 21 |
| female | bicycle | 17 |
```cson
percentage:
new_column: 'number_percentage'
column: 'number'
group_cols: ['sport']
```
**Output**
| gender | sport | number | number_percentage |
|:------:|:----------:|:------:|:-----------------:|
| male | bicycle | 17 | 50.0 |
| female | basketball | 17 | 85.0 |
| male | basketball | 3 | 15.0 |
| female | football | 7 | 25.0 |
| female | running | 30 | 60.0 |
| male | running | 20 | 40.0 |
| male | football | 21 | 75.0 |
| female | bicycle | 17 | 50.0 |
"""
new_column = new_column or column
if group_cols is None:
df[new_column] = 100. * df[column] / sum(df[column])
else:
df[new_column] = 100. * df[column] / df.groupby(group_cols)[column].transform(sum)
return df
|
def percentage(
df,
column: str,
group_cols: Union[str, List[str]] = None,
new_column: str = None
):
"""
Add a column to the dataframe according to the groupby logic on group_cols
---
### Parameters
*mandatory :*
- `column` (*str*): name of the desired column you need percentage on
*optional :*
- `group_cols` (*list*): names of columns for the groupby logic
- `new_column` (*str*): name of the output column. By default `column` will be overwritten.
---
**Input**
| gender | sport | number |
|:------:|:----------:|:------:|
| male | bicycle | 17 |
| female | basketball | 17 |
| male | basketball | 3 |
| female | football | 7 |
| female | running | 30 |
| male | running | 20 |
| male | football | 21 |
| female | bicycle | 17 |
```cson
percentage:
new_column: 'number_percentage'
column: 'number'
group_cols: ['sport']
```
**Output**
| gender | sport | number | number_percentage |
|:------:|:----------:|:------:|:-----------------:|
| male | bicycle | 17 | 50.0 |
| female | basketball | 17 | 85.0 |
| male | basketball | 3 | 15.0 |
| female | football | 7 | 25.0 |
| female | running | 30 | 60.0 |
| male | running | 20 | 40.0 |
| male | football | 21 | 75.0 |
| female | bicycle | 17 | 50.0 |
"""
new_column = new_column or column
if group_cols is None:
df[new_column] = 100. * df[column] / sum(df[column])
else:
df[new_column] = 100. * df[column] / df.groupby(group_cols)[column].transform(sum)
return df
|
[
"Add",
"a",
"column",
"to",
"the",
"dataframe",
"according",
"to",
"the",
"groupby",
"logic",
"on",
"group_cols"
] |
ToucanToco/toucan-data-sdk
|
python
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/percentage.py#L4-L64
|
[
"def",
"percentage",
"(",
"df",
",",
"column",
":",
"str",
",",
"group_cols",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"new_column",
":",
"str",
"=",
"None",
")",
":",
"new_column",
"=",
"new_column",
"or",
"column",
"if",
"group_cols",
"is",
"None",
":",
"df",
"[",
"new_column",
"]",
"=",
"100.",
"*",
"df",
"[",
"column",
"]",
"/",
"sum",
"(",
"df",
"[",
"column",
"]",
")",
"else",
":",
"df",
"[",
"new_column",
"]",
"=",
"100.",
"*",
"df",
"[",
"column",
"]",
"/",
"df",
".",
"groupby",
"(",
"group_cols",
")",
"[",
"column",
"]",
".",
"transform",
"(",
"sum",
")",
"return",
"df"
] |
c3ca874e1b64f4bdcc2edda750a72d45d1561d8a
|
test
|
ada_family_core
|
Optimize by SGD, AdaGrad, or AdaDelta.
|
deepy/trainers/cores/ada_family.py
|
def ada_family_core(params, gparams, learning_rate = 0.01, eps= 1e-6, rho=0.95, method="ADADELTA",
beta=0.0, gsum_regularization = 0.0001):
"""
Optimize by SGD, AdaGrad, or AdaDelta.
"""
_, _, _, args = inspect.getargvalues(inspect.currentframe())
logging.info("ada_family_core: %s" % str(args.items()))
free_parameters = []
if method == "FINETUNING_ADAGRAD":
method = "ADAGRAD"
gsum_regularization = 0
oneMinusBeta = 1 - beta
gsums = [theano.shared(np.zeros_like(param.get_value(borrow=True), dtype=FLOATX), name="gsum_%s" % param.name) if (method == 'ADADELTA' or method == 'ADAGRAD') else None for param in params]
xsums = [theano.shared(np.zeros_like(param.get_value(borrow=True), dtype=FLOATX), name="xsum_%s" % param.name) if method == 'ADADELTA' else None for param in params]
# Fix for AdaGrad, init gsum to 1
if method == 'ADAGRAD':
for gsum in gsums:
gsum.set_value(gsum.get_value() ** 0)
updates = OrderedDict()
# Updates
for gparam, param, gsum, xsum in zip(gparams, params, gsums, xsums):
if method == 'ADADELTA':
updates[gsum] = rho * gsum + (1. - rho) * (gparam **2)
dparam = -T.sqrt((xsum + eps) / (updates[gsum] + eps)) * gparam
updates[xsum] =rho * xsum + (1. - rho) * (dparam **2)
updates[param] = param * oneMinusBeta + dparam
elif method == 'ADAGRAD':
updates[gsum] = gsum + (gparam **2) - gsum_regularization * gsum
updates[param] = param * oneMinusBeta - learning_rate * (gparam / (T.sqrt(updates[gsum] + eps)))
else:
updates[param] = param * oneMinusBeta - gparam * learning_rate
# Add free parameters
if method == 'ADADELTA':
free_parameters.extend(gsums + xsums)
elif method == 'ADAGRAD':
free_parameters.extend(gsums)
# Check dtype
for k in updates:
if updates[k].dtype != FLOATX:
updates[k] = updates[k].astype(FLOATX)
return updates.items(), free_parameters
|
def ada_family_core(params, gparams, learning_rate = 0.01, eps= 1e-6, rho=0.95, method="ADADELTA",
beta=0.0, gsum_regularization = 0.0001):
"""
Optimize by SGD, AdaGrad, or AdaDelta.
"""
_, _, _, args = inspect.getargvalues(inspect.currentframe())
logging.info("ada_family_core: %s" % str(args.items()))
free_parameters = []
if method == "FINETUNING_ADAGRAD":
method = "ADAGRAD"
gsum_regularization = 0
oneMinusBeta = 1 - beta
gsums = [theano.shared(np.zeros_like(param.get_value(borrow=True), dtype=FLOATX), name="gsum_%s" % param.name) if (method == 'ADADELTA' or method == 'ADAGRAD') else None for param in params]
xsums = [theano.shared(np.zeros_like(param.get_value(borrow=True), dtype=FLOATX), name="xsum_%s" % param.name) if method == 'ADADELTA' else None for param in params]
# Fix for AdaGrad, init gsum to 1
if method == 'ADAGRAD':
for gsum in gsums:
gsum.set_value(gsum.get_value() ** 0)
updates = OrderedDict()
# Updates
for gparam, param, gsum, xsum in zip(gparams, params, gsums, xsums):
if method == 'ADADELTA':
updates[gsum] = rho * gsum + (1. - rho) * (gparam **2)
dparam = -T.sqrt((xsum + eps) / (updates[gsum] + eps)) * gparam
updates[xsum] =rho * xsum + (1. - rho) * (dparam **2)
updates[param] = param * oneMinusBeta + dparam
elif method == 'ADAGRAD':
updates[gsum] = gsum + (gparam **2) - gsum_regularization * gsum
updates[param] = param * oneMinusBeta - learning_rate * (gparam / (T.sqrt(updates[gsum] + eps)))
else:
updates[param] = param * oneMinusBeta - gparam * learning_rate
# Add free parameters
if method == 'ADADELTA':
free_parameters.extend(gsums + xsums)
elif method == 'ADAGRAD':
free_parameters.extend(gsums)
# Check dtype
for k in updates:
if updates[k].dtype != FLOATX:
updates[k] = updates[k].astype(FLOATX)
return updates.items(), free_parameters
|
[
"Optimize",
"by",
"SGD",
"AdaGrad",
"or",
"AdaDelta",
"."
] |
zomux/deepy
|
python
|
https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/trainers/cores/ada_family.py#L13-L61
|
[
"def",
"ada_family_core",
"(",
"params",
",",
"gparams",
",",
"learning_rate",
"=",
"0.01",
",",
"eps",
"=",
"1e-6",
",",
"rho",
"=",
"0.95",
",",
"method",
"=",
"\"ADADELTA\"",
",",
"beta",
"=",
"0.0",
",",
"gsum_regularization",
"=",
"0.0001",
")",
":",
"_",
",",
"_",
",",
"_",
",",
"args",
"=",
"inspect",
".",
"getargvalues",
"(",
"inspect",
".",
"currentframe",
"(",
")",
")",
"logging",
".",
"info",
"(",
"\"ada_family_core: %s\"",
"%",
"str",
"(",
"args",
".",
"items",
"(",
")",
")",
")",
"free_parameters",
"=",
"[",
"]",
"if",
"method",
"==",
"\"FINETUNING_ADAGRAD\"",
":",
"method",
"=",
"\"ADAGRAD\"",
"gsum_regularization",
"=",
"0",
"oneMinusBeta",
"=",
"1",
"-",
"beta",
"gsums",
"=",
"[",
"theano",
".",
"shared",
"(",
"np",
".",
"zeros_like",
"(",
"param",
".",
"get_value",
"(",
"borrow",
"=",
"True",
")",
",",
"dtype",
"=",
"FLOATX",
")",
",",
"name",
"=",
"\"gsum_%s\"",
"%",
"param",
".",
"name",
")",
"if",
"(",
"method",
"==",
"'ADADELTA'",
"or",
"method",
"==",
"'ADAGRAD'",
")",
"else",
"None",
"for",
"param",
"in",
"params",
"]",
"xsums",
"=",
"[",
"theano",
".",
"shared",
"(",
"np",
".",
"zeros_like",
"(",
"param",
".",
"get_value",
"(",
"borrow",
"=",
"True",
")",
",",
"dtype",
"=",
"FLOATX",
")",
",",
"name",
"=",
"\"xsum_%s\"",
"%",
"param",
".",
"name",
")",
"if",
"method",
"==",
"'ADADELTA'",
"else",
"None",
"for",
"param",
"in",
"params",
"]",
"# Fix for AdaGrad, init gsum to 1",
"if",
"method",
"==",
"'ADAGRAD'",
":",
"for",
"gsum",
"in",
"gsums",
":",
"gsum",
".",
"set_value",
"(",
"gsum",
".",
"get_value",
"(",
")",
"**",
"0",
")",
"updates",
"=",
"OrderedDict",
"(",
")",
"# Updates",
"for",
"gparam",
",",
"param",
",",
"gsum",
",",
"xsum",
"in",
"zip",
"(",
"gparams",
",",
"params",
",",
"gsums",
",",
"xsums",
")",
":",
"if",
"method",
"==",
"'ADADELTA'",
":",
"updates",
"[",
"gsum",
"]",
"=",
"rho",
"*",
"gsum",
"+",
"(",
"1.",
"-",
"rho",
")",
"*",
"(",
"gparam",
"**",
"2",
")",
"dparam",
"=",
"-",
"T",
".",
"sqrt",
"(",
"(",
"xsum",
"+",
"eps",
")",
"/",
"(",
"updates",
"[",
"gsum",
"]",
"+",
"eps",
")",
")",
"*",
"gparam",
"updates",
"[",
"xsum",
"]",
"=",
"rho",
"*",
"xsum",
"+",
"(",
"1.",
"-",
"rho",
")",
"*",
"(",
"dparam",
"**",
"2",
")",
"updates",
"[",
"param",
"]",
"=",
"param",
"*",
"oneMinusBeta",
"+",
"dparam",
"elif",
"method",
"==",
"'ADAGRAD'",
":",
"updates",
"[",
"gsum",
"]",
"=",
"gsum",
"+",
"(",
"gparam",
"**",
"2",
")",
"-",
"gsum_regularization",
"*",
"gsum",
"updates",
"[",
"param",
"]",
"=",
"param",
"*",
"oneMinusBeta",
"-",
"learning_rate",
"*",
"(",
"gparam",
"/",
"(",
"T",
".",
"sqrt",
"(",
"updates",
"[",
"gsum",
"]",
"+",
"eps",
")",
")",
")",
"else",
":",
"updates",
"[",
"param",
"]",
"=",
"param",
"*",
"oneMinusBeta",
"-",
"gparam",
"*",
"learning_rate",
"# Add free parameters",
"if",
"method",
"==",
"'ADADELTA'",
":",
"free_parameters",
".",
"extend",
"(",
"gsums",
"+",
"xsums",
")",
"elif",
"method",
"==",
"'ADAGRAD'",
":",
"free_parameters",
".",
"extend",
"(",
"gsums",
")",
"# Check dtype",
"for",
"k",
"in",
"updates",
":",
"if",
"updates",
"[",
"k",
"]",
".",
"dtype",
"!=",
"FLOATX",
":",
"updates",
"[",
"k",
"]",
"=",
"updates",
"[",
"k",
"]",
".",
"astype",
"(",
"FLOATX",
")",
"return",
"updates",
".",
"items",
"(",
")",
",",
"free_parameters"
] |
090fbad22a08a809b12951cd0d4984f5bd432698
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.